Skip to content

Commit

Permalink
Make sure free space has a writeable CachedStore (#574)
Browse files Browse the repository at this point in the history
  • Loading branch information
rkuris authored Mar 6, 2024
1 parent 9a5b381 commit 5f63b81
Show file tree
Hide file tree
Showing 13 changed files with 89 additions and 39 deletions.
2 changes: 1 addition & 1 deletion firewood/benches/hashops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ impl Profiler for FlamegraphProfiler {
fn bench_trie_hash(criterion: &mut Criterion) {
let mut to = [1u8; TRIE_HASH_LEN];
let mut store = DynamicMem::new(TRIE_HASH_LEN as u64, 0u8);
store.write(0, &*ZERO_HASH);
store.write(0, &*ZERO_HASH).expect("write should succeed");

#[allow(clippy::unwrap_used)]
criterion
Expand Down
2 changes: 1 addition & 1 deletion firewood/benches/shale-bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ fn get_view<C: CachedStore>(b: &mut Bencher, mut cached: C) {

let offset = rng.gen_range(0..BENCH_MEM_SIZE - len);

cached.write(offset, rdata);
cached.write(offset, rdata).expect("write should succeed");
#[allow(clippy::unwrap_used)]
let view = cached
.get_view(offset, rdata.len().try_into().unwrap())
Expand Down
23 changes: 14 additions & 9 deletions firewood/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,8 @@ impl DbRev<MutStore> {
}

impl From<DbRev<MutStore>> for DbRev<SharedStore> {
fn from(value: DbRev<MutStore>) -> Self {
fn from(mut value: DbRev<MutStore>) -> Self {
value.flush_dirty();
DbRev {
header: value.header,
merkle: value.merkle.into(),
Expand Down Expand Up @@ -616,16 +617,20 @@ impl Db {
merkle: get_sub_universe_from_empty_delta(&data_cache.merkle),
};

let db_header_ref = Db::get_db_header_ref(&base.merkle.meta)?;
// convert the base merkle objects into writable ones
let meta: StoreRevMut = base.merkle.meta.clone().into();
let payload: StoreRevMut = base.merkle.payload.clone().into();

// get references to the DbHeader and the CompactSpaceHeader
// for free space management
let db_header_ref = Db::get_db_header_ref(&meta)?;
let merkle_payload_header_ref =
Db::get_payload_header_ref(&base.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)?;

Db::get_payload_header_ref(&meta, Db::PARAM_SIZE + DbHeader::MSIZE)?;
let header_refs = (db_header_ref, merkle_payload_header_ref);

let base_revision = Db::new_revision(
let base_revision = Db::new_revision::<StoreRevMut, _>(
header_refs,
(base.merkle.meta.clone(), base.merkle.payload.clone()),
(meta, payload),
params.payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand All @@ -644,7 +649,7 @@ impl Db {
root_hashes: VecDeque::new(),
max_revisions: cfg.wal.max_revisions as usize,
base,
base_revision: Arc::new(base_revision),
base_revision: Arc::new(base_revision.into()),
})),
payload_regn_nbit: params.payload_regn_nbit,
metrics: Arc::new(DbMetrics::default()),
Expand Down Expand Up @@ -718,11 +723,11 @@ impl Db {
#[allow(clippy::unwrap_used)]
NonZeroUsize::new(SPACE_RESERVED as usize).unwrap(),
))?,
);
)?;
merkle_meta_store.write(
db_header.into(),
&shale::to_dehydrated(&DbHeader::new_empty())?,
);
)?;
}

let store = Universe {
Expand Down
2 changes: 1 addition & 1 deletion firewood/src/db/proposal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ impl Proposal {
.resize(max_revisions, TrieHash([0; TRIE_HASH_LEN]));
}

rev_inner.root_hash_staging.write(0, &hash.0);
rev_inner.root_hash_staging.write(0, &hash.0)?;
let (root_hash_redo, root_hash_wal) = rev_inner.root_hash_staging.delta();

// schedule writes to the disk
Expand Down
3 changes: 2 additions & 1 deletion firewood/src/merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1423,7 +1423,8 @@ mod tests {
std::num::NonZeroUsize::new(RESERVED).unwrap(),
))
.unwrap(),
);
)
.unwrap();
let compact_header = shale::StoredView::ptr_to_obj(
&dm,
compact_header,
Expand Down
2 changes: 1 addition & 1 deletion firewood/src/merkle/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -942,7 +942,7 @@ mod tests {
node.serialize(&mut bytes).expect("node should serialize");

let mut mem = PlainMem::new(serialized_len, 0);
mem.write(0, &bytes);
mem.write(0, &bytes).expect("write should succed");

let mut hydrated_node = Node::deserialize(0, &mem).expect("node should deserialize");

Expand Down
3 changes: 2 additions & 1 deletion firewood/src/merkle_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ where
NonZeroUsize::new(RESERVED).unwrap(),
))
.unwrap(),
);
)
.expect("write should succeed");
#[allow(clippy::unwrap_used)]
let compact_header =
StoredView::ptr_to_obj(&dm, compact_header, shale::compact::CompactHeader::MSIZE)
Expand Down
28 changes: 18 additions & 10 deletions firewood/src/shale/cached.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ use std::{
#[cfg(test)]
pub use crate::shale::plainmem::PlainMem;

use super::ShaleError;

// Purely volatile, dynamically allocated vector-based implementation for
// [CachedStore]. This is similar to PlainMem (in testing). The only
// difference is, when [write] dynamically allocate more space if original
Expand Down Expand Up @@ -61,7 +63,7 @@ impl CachedStore for DynamicMem {
}))
}

fn write(&mut self, offset: usize, change: &[u8]) {
fn write(&mut self, offset: usize, change: &[u8]) -> Result<(), ShaleError> {
let length = change.len();
let size = offset + length;

Expand All @@ -73,12 +75,18 @@ impl CachedStore for DynamicMem {
space.resize(size, 0);
}
#[allow(clippy::indexing_slicing)]
space[offset..offset + length].copy_from_slice(change)
space[offset..offset + length].copy_from_slice(change);

Ok(())
}

fn id(&self) -> SpaceId {
self.id
}

fn is_writeable(&self) -> bool {
true
}
}

#[derive(Debug)]
Expand Down Expand Up @@ -122,14 +130,14 @@ mod tests {
fn test_plain_mem() {
let mut view = PlainMemShared(PlainMem::new(2, 0));
let mem = &mut *view;
mem.write(0, &[1, 1]);
mem.write(0, &[1, 2]);
mem.write(0, &[1, 1]).unwrap();
mem.write(0, &[1, 2]).unwrap();
#[allow(clippy::unwrap_used)]
let r = mem.get_view(0, 2).unwrap().as_deref();
assert_eq!(r, [1, 2]);

// previous view not mutated by write
mem.write(0, &[1, 3]);
mem.write(0, &[1, 3]).unwrap();
assert_eq!(r, [1, 2]);
let r = mem.get_view(0, 2).unwrap().as_deref();
assert_eq!(r, [1, 3]);
Expand All @@ -145,20 +153,20 @@ mod tests {
let mem = &mut *view;

// out of range
mem.write(1, &[7, 8]);
mem.write(1, &[7, 8]).unwrap();
}

#[test]
fn test_dynamic_mem() {
let mut view = DynamicMemShared(DynamicMem::new(2, 0));
let mem = &mut *view;
mem.write(0, &[1, 2]);
mem.write(0, &[3, 4]);
mem.write(0, &[1, 2]).unwrap();
mem.write(0, &[3, 4]).unwrap();
assert_eq!(mem.get_view(0, 2).unwrap().as_deref(), [3, 4]);
mem.get_shared().write(0, &[5, 6]);
mem.get_shared().write(0, &[5, 6]).unwrap();

// capacity is increased
mem.write(5, &[0; 10]);
mem.write(5, &[0; 10]).unwrap();

// get a view larger than recent growth
assert_eq!(mem.get_view(3, 20).unwrap().as_deref(), [0; 20]);
Expand Down
3 changes: 2 additions & 1 deletion firewood/src/shale/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,8 @@ mod tests {
reserved.0.unwrap(),
))
.unwrap(),
);
)
.unwrap();
let compact_header =
StoredView::ptr_to_obj(&dm, compact_header, CompactHeader::MSIZE).unwrap();
let mem_meta = dm;
Expand Down
12 changes: 10 additions & 2 deletions firewood/src/shale/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ pub enum ShaleError {
InvalidCacheView { offset: usize, size: u64 },
#[error("io error: {0}")]
Io(#[from] std::io::Error),
#[error("Write on immutable cache")]
ImmutableWrite,
}

// TODO:
Expand Down Expand Up @@ -95,9 +97,12 @@ pub trait CachedStore: Debug + Send + Sync {
fn get_shared(&self) -> Box<dyn SendSyncDerefMut<Target = dyn CachedStore>>;
/// Write the `change` to the portion of the linear space starting at `offset`. The change
/// should be immediately visible to all `CachedView` associated to this linear space.
fn write(&mut self, offset: usize, change: &[u8]);
fn write(&mut self, offset: usize, change: &[u8]) -> Result<(), ShaleError>;
/// Returns the identifier of this storage space.
fn id(&self) -> SpaceId;

/// Returns whether or not this store is writable
fn is_writeable(&self) -> bool;
}

/// A wrapper of `TypedView` to enable writes. The direct construction (by [Obj::from_typed_view]
Expand Down Expand Up @@ -127,6 +132,9 @@ impl<T: Storable> Obj<T> {
None => return Err(ObjWriteSizeError),
};

// catch writes that cannot be flushed early during debugging
debug_assert!(self.value.get_mem_store().is_writeable());

Ok(())
}

Expand All @@ -153,7 +161,7 @@ impl<T: Storable> Obj<T> {
self.value.write_mem_image(&mut new_value).unwrap();
let offset = self.value.get_offset();
let bx: &mut dyn CachedStore = self.value.get_mut_mem_store();
bx.write(offset, &new_value);
bx.write(offset, &new_value).expect("write should succeed");
}
}
}
Expand Down
9 changes: 7 additions & 2 deletions firewood/src/shale/plainmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use std::{
sync::{Arc, RwLock},
};

use super::{CachedStore, CachedView, SendSyncDerefMut, SpaceId};
use super::{CachedStore, CachedView, SendSyncDerefMut, ShaleError, SpaceId};

/// in-memory vector-based implementation for [CachedStore] for testing
// built on [ShaleStore](super::ShaleStore) in memory, without having to write
Expand Down Expand Up @@ -54,17 +54,22 @@ impl CachedStore for PlainMem {
}))
}

fn write(&mut self, offset: usize, change: &[u8]) {
fn write(&mut self, offset: usize, change: &[u8]) -> Result<(), ShaleError> {
let length = change.len();
#[allow(clippy::unwrap_used)]
let mut vect = self.space.deref().write().unwrap();
#[allow(clippy::indexing_slicing)]
vect.as_mut_slice()[offset..offset + length].copy_from_slice(change);
Ok(())
}

fn id(&self) -> SpaceId {
self.id
}

fn is_writeable(&self) -> bool {
true
}
}

#[derive(Debug)]
Expand Down
12 changes: 6 additions & 6 deletions firewood/src/storage/buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -711,14 +711,14 @@ mod tests {
let change = b"this is a test";

// write to the in memory buffer not to disk
mut_store.write(0, change);
mut_store.write(0, change).unwrap();
assert_eq!(mut_store.id(), STATE_SPACE);

// mutate the in memory buffer.
let change = b"this is another test";

// write to the in memory buffer (ash) not yet to disk
mut_store.write(0, change);
mut_store.write(0, change).unwrap();
assert_eq!(mut_store.id(), STATE_SPACE);

// wal should have no records.
Expand Down Expand Up @@ -789,7 +789,7 @@ mod tests {
let hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(data).into();

// write to the in memory buffer (ash) not yet to disk
mut_store.write(0, &hash);
mut_store.write(0, &hash).unwrap();
assert_eq!(mut_store.id(), STATE_SPACE);

// wal should have no records.
Expand Down Expand Up @@ -871,15 +871,15 @@ mod tests {
// mutate the in memory buffer.
let data = b"this is a test";
let hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(data).into();
block_in_place(|| store.write(0, &hash));
block_in_place(|| store.write(0, &hash)).unwrap();
assert_eq!(store.id(), STATE_SPACE);

let another_data = b"this is another test";
let another_hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(another_data).into();

// mutate the in memory buffer in another StoreRev new from the above.
let mut another_store = StoreRevMut::new_from_other(&store);
block_in_place(|| another_store.write(32, &another_hash));
block_in_place(|| another_store.write(32, &another_hash)).unwrap();
assert_eq!(another_store.id(), STATE_SPACE);

// wal should have no records.
Expand All @@ -902,7 +902,7 @@ mod tests {
assert_eq!(view.as_deref(), empty);

// Overwrite the value from the beginning in the new store. Only the new store should see the change.
another_store.write(0, &another_hash);
another_store.write(0, &another_hash).unwrap();
let view = another_store.get_view(0, HASH_SIZE as u64).unwrap();
assert_eq!(view.as_deref(), another_hash);
let view = store.get_view(0, HASH_SIZE as u64).unwrap();
Expand Down
Loading

0 comments on commit 5f63b81

Please sign in to comment.