diff --git a/firewood/src/db.rs b/firewood/src/db.rs index ffc37a681..c1377c02d 100644 --- a/firewood/src/db.rs +++ b/firewood/src/db.rs @@ -811,7 +811,7 @@ impl Db { // create the sentinel node #[allow(clippy::unwrap_used)] db_header_ref - .write(|r| { + .modify(|r| { err = (|| { r.kv_root = merkle.init_root()?; Ok(()) diff --git a/firewood/src/shale/compact.rs b/firewood/src/shale/compact.rs index 7317bbe22..104de584e 100644 --- a/firewood/src/shale/compact.rs +++ b/firewood/src/shale/compact.rs @@ -279,17 +279,17 @@ impl CompactSpaceInner { #[allow(clippy::unwrap_used)] self.header .meta_space_tail - .write(|r| *r -= desc_size as usize) + .modify(|r| *r -= desc_size as usize) .unwrap(); if desc_addr != DiskAddress(**self.header.meta_space_tail) { let desc_last = self.get_descriptor(*self.header.meta_space_tail.value)?; let mut desc = self.get_descriptor(desc_addr)?; #[allow(clippy::unwrap_used)] - desc.write(|r| *r = *desc_last).unwrap(); + desc.modify(|r| *r = *desc_last).unwrap(); let mut header = self.get_header(desc.haddr.into())?; #[allow(clippy::unwrap_used)] - header.write(|h| h.desc_addr = desc_addr).unwrap(); + header.modify(|h| h.desc_addr = desc_addr).unwrap(); } Ok(()) @@ -300,7 +300,7 @@ impl CompactSpaceInner { #[allow(clippy::unwrap_used)] self.header .meta_space_tail - .write(|r| *r += CompactDescriptor::MSIZE as usize) + .modify(|r| *r += CompactDescriptor::MSIZE as usize) .unwrap(); Ok(DiskAddress(addr)) @@ -365,7 +365,7 @@ impl CompactSpaceInner { { let mut desc = self.get_descriptor(desc_addr)?; #[allow(clippy::unwrap_used)] - desc.write(|d| { + desc.modify(|d| { d.payload_size = payload_size; d.haddr = h as usize; }) @@ -374,14 +374,14 @@ impl CompactSpaceInner { let mut h = self.get_header(DiskAddress::from(h as usize))?; let mut f = self.get_footer(DiskAddress::from(f as usize))?; #[allow(clippy::unwrap_used)] - h.write(|h| { + h.modify(|h| { h.payload_size = payload_size; h.is_freed = true; h.desc_addr = desc_addr; }) .unwrap(); #[allow(clippy::unwrap_used)] - f.write(|f| f.payload_size = payload_size).unwrap(); + f.modify(|f| f.payload_size = payload_size).unwrap(); Ok(()) } @@ -417,7 +417,7 @@ impl CompactSpaceInner { assert_eq!(header.payload_size as usize, desc_payload_size); assert!(header.is_freed); #[allow(clippy::unwrap_used)] - header.write(|h| h.is_freed = false).unwrap(); + header.modify(|h| h.is_freed = false).unwrap(); } self.del_desc(ptr)?; true @@ -429,7 +429,7 @@ impl CompactSpaceInner { assert!(lheader.is_freed); #[allow(clippy::unwrap_used)] lheader - .write(|h| { + .modify(|h| { h.is_freed = false; h.payload_size = length; }) @@ -440,7 +440,7 @@ impl CompactSpaceInner { self.get_footer(DiskAddress::from(desc_haddr + hsize + length as usize))?; //assert!(lfooter.payload_size == desc_payload_size); #[allow(clippy::unwrap_used)] - lfooter.write(|f| f.payload_size = length).unwrap(); + lfooter.modify(|f| f.payload_size = length).unwrap(); } let offset = desc_haddr + hsize + length as usize + fsize; @@ -450,7 +450,7 @@ impl CompactSpaceInner { let mut rdesc = self.get_descriptor(rdesc_addr)?; #[allow(clippy::unwrap_used)] rdesc - .write(|rd| { + .modify(|rd| { rd.payload_size = rpayload_size as u64; rd.haddr = offset; }) @@ -460,7 +460,7 @@ impl CompactSpaceInner { let mut rheader = self.get_header(DiskAddress::from(offset))?; #[allow(clippy::unwrap_used)] rheader - .write(|rh| { + .modify(|rh| { rh.is_freed = true; rh.payload_size = rpayload_size as u64; rh.desc_addr = rdesc_addr; @@ -472,7 +472,7 @@ impl CompactSpaceInner { self.get_footer(DiskAddress::from(offset + hsize + rpayload_size))?; #[allow(clippy::unwrap_used)] rfooter - .write(|f| f.payload_size = rpayload_size as u64) + .modify(|f| f.payload_size = rpayload_size as u64) .unwrap(); } self.del_desc(ptr)?; @@ -482,7 +482,7 @@ impl CompactSpaceInner { }; #[allow(clippy::unwrap_used)] if exit { - self.header.alloc_addr.write(|r| *r = ptr).unwrap(); + self.header.alloc_addr.modify(|r| *r = ptr).unwrap(); res = Some((desc_haddr + hsize) as u64); break; } @@ -504,7 +504,7 @@ impl CompactSpaceInner { #[allow(clippy::unwrap_used)] self.header .compact_space_tail - .write(|r| { + .modify(|r| { // an item is always fully in one region let rem = regn_size - (offset & (regn_size - 1)).get(); if rem < total_length as usize { @@ -517,14 +517,14 @@ impl CompactSpaceInner { let mut h = self.get_header(offset)?; let mut f = self.get_footer(offset + CompactHeader::MSIZE as usize + length as usize)?; #[allow(clippy::unwrap_used)] - h.write(|h| { + h.modify(|h| { h.payload_size = length; h.is_freed = false; h.desc_addr = DiskAddress::null(); }) .unwrap(); #[allow(clippy::unwrap_used)] - f.write(|f| f.payload_size = length).unwrap(); + f.modify(|f| f.payload_size = length).unwrap(); #[allow(clippy::unwrap_used)] Ok((offset + CompactHeader::MSIZE as usize).0.unwrap().get() as u64) } diff --git a/firewood/src/shale/mod.rs b/firewood/src/shale/mod.rs index badfeef05..d74f6ffb8 100644 --- a/firewood/src/shale/mod.rs +++ b/firewood/src/shale/mod.rs @@ -54,8 +54,7 @@ pub type SpaceId = u8; pub const INVALID_SPACE_ID: SpaceId = 0xff; pub struct DiskWrite { - pub space_id: SpaceId, - pub space_off: u64, + pub offset: u64, pub data: Box<[u8]>, } @@ -63,9 +62,8 @@ impl std::fmt::Debug for DiskWrite { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { write!( f, - "[Disk space=0x{:02x} offset=0x{:04x} data=0x{}", - self.space_id, - self.space_off, + "[Disk offset=0x{:04x} data=0x{}", + self.offset, hex::encode(&self.data) ) } @@ -105,13 +103,14 @@ pub trait CachedStore: Debug + Send + Sync { fn is_writeable(&self) -> bool; } -/// A wrapper of `TypedView` to enable writes. The direct construction (by [Obj::from_typed_view] +/// A wrapper of `StoredView` to enable writes. The direct construction (by [Obj::from_stored_view] /// or [StoredView::ptr_to_obj]) could be useful for some unsafe access to a low-level item (e.g. /// headers/metadata at bootstrap or part of [ShaleStore] implementation) stored at a given [DiskAddress] /// . Users of [ShaleStore] implementation, however, will only use [ObjRef] for safeguarded access. #[derive(Debug)] pub struct Obj { value: StoredView, + /// None if the object isn't dirty, otherwise the length of the serialized object. dirty: Option, } @@ -121,13 +120,13 @@ impl Obj { DiskAddress(NonZeroUsize::new(self.value.get_offset())) } - /// Write to the underlying object. Returns `Ok(())` on success. + /// Modifies the value of this object and marks it as dirty. #[inline] - pub fn write(&mut self, modify: impl FnOnce(&mut T)) -> Result<(), ObjWriteSizeError> { - modify(self.value.write()); + pub fn modify(&mut self, modify_func: impl FnOnce(&mut T)) -> Result<(), ObjWriteSizeError> { + modify_func(self.value.mut_item_ref()); - // if `estimate_mem_image` gives overflow, the object will not be written - self.dirty = match self.value.estimate_mem_image() { + // if `serialized_len` gives overflow, the object will not be written + self.dirty = match self.value.serialized_len() { Some(len) => Some(len), None => return Err(ObjWriteSizeError), }; @@ -139,12 +138,7 @@ impl Obj { } #[inline(always)] - pub fn get_space_id(&self) -> SpaceId { - self.value.get_mem_store().id() - } - - #[inline(always)] - pub const fn from_typed_view(value: StoredView) -> Self { + pub const fn from_stored_view(value: StoredView) -> Self { Obj { value, dirty: None } } @@ -158,7 +152,7 @@ impl Obj { let mut new_value = vec![0; new_value_len as usize]; // TODO: log error #[allow(clippy::unwrap_used)] - self.value.write_mem_image(&mut new_value).unwrap(); + self.value.serialize(&mut new_value).unwrap(); let offset = self.value.get_offset(); let bx: &mut dyn CachedStore = self.value.get_mut_mem_store(); bx.write(offset, &new_value).expect("write should succeed"); @@ -173,7 +167,7 @@ impl Obj { data: Vec::new().into(), }; - std::mem::replace(&mut self.value.decoded, Node::from_leaf(empty_node)) + std::mem::replace(&mut self.value.item, Node::from_leaf(empty_node)) } } @@ -207,7 +201,7 @@ impl<'a, T: Storable + Debug> ObjRef<'a, T> { #[inline] pub fn write(&mut self, modify: impl FnOnce(&mut T)) -> Result<(), ObjWriteSizeError> { - self.inner.write(modify)?; + self.inner.modify(modify)?; self.cache.lock().dirty.insert(self.inner.as_ptr()); @@ -292,29 +286,32 @@ pub trait Storable { } pub fn to_dehydrated(item: &dyn Storable) -> Result, ShaleError> { - let mut buff = vec![0; item.serialized_len() as usize]; - item.serialize(&mut buff)?; - Ok(buff) + let mut buf = vec![0; item.serialized_len() as usize]; + item.serialize(&mut buf)?; + Ok(buf) } /// A stored view of any [Storable] pub struct StoredView { - decoded: T, + /// The item this stores. + item: T, mem: Box>, offset: usize, + /// If the serialized length of `item` is greater than this, + /// `serialized_len` will return `None`. len_limit: u64, } impl Debug for StoredView { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let StoredView { - decoded, + item, offset, len_limit, mem: _, } = self; f.debug_struct("StoredView") - .field("decoded", decoded) + .field("item", item) .field("offset", offset) .field("len_limit", len_limit) .finish() @@ -324,7 +321,7 @@ impl Debug for StoredView { impl Deref for StoredView { type Target = T; fn deref(&self) -> &T { - &self.decoded + &self.item } } @@ -341,8 +338,9 @@ impl StoredView { &mut **self.mem } - fn estimate_mem_image(&self) -> Option { - let len = self.decoded.serialized_len(); + /// Returns the serialized length of the item if it's less than the limit, otherwise `None`. + fn serialized_len(&self) -> Option { + let len = self.item.serialized_len(); if len > self.len_limit { None } else { @@ -350,23 +348,23 @@ impl StoredView { } } - fn write_mem_image(&self, mem_image: &mut [u8]) -> Result<(), ShaleError> { - self.decoded.serialize(mem_image) + fn serialize(&self, mem_image: &mut [u8]) -> Result<(), ShaleError> { + self.item.serialize(mem_image) } - fn write(&mut self) -> &mut T { - &mut self.decoded + fn mut_item_ref(&mut self) -> &mut T { + &mut self.item } } impl StoredView { #[inline(always)] fn new(offset: usize, len_limit: u64, space: &U) -> Result { - let decoded = T::deserialize(offset, space)?; + let item = T::deserialize(offset, space)?; Ok(Self { offset, - decoded, + item, mem: space.get_shared(), len_limit, }) @@ -376,12 +374,12 @@ impl StoredView { fn from_hydrated( offset: usize, len_limit: u64, - decoded: T, + item: T, space: &dyn CachedStore, ) -> Result { Ok(Self { offset, - decoded, + item, mem: space.get_shared(), len_limit, }) @@ -393,7 +391,7 @@ impl StoredView { ptr: DiskAddress, len_limit: u64, ) -> Result, ShaleError> { - Ok(Obj::from_typed_view(Self::new( + Ok(Obj::from_stored_view(Self::new( ptr.get(), len_limit, store, @@ -405,10 +403,10 @@ impl StoredView { store: &dyn CachedStore, addr: usize, len_limit: u64, - decoded: T, + item: T, ) -> Result, ShaleError> { - Ok(Obj::from_typed_view(Self::from_hydrated( - addr, len_limit, decoded, store, + Ok(Obj::from_stored_view(Self::from_hydrated( + addr, len_limit, item, store, )?)) } } @@ -417,12 +415,12 @@ impl StoredView { fn new_from_slice( offset: usize, len_limit: u64, - decoded: T, + item: T, space: &dyn CachedStore, ) -> Result { Ok(Self { offset, - decoded, + item, mem: space.get_shared(), len_limit, }) @@ -432,7 +430,7 @@ impl StoredView { s: &Obj, offset: usize, length: u64, - decoded: U, + item: U, ) -> Result, ShaleError> { let addr_ = s.value.get_offset() + offset; if s.dirty.is_some() { @@ -442,7 +440,7 @@ impl StoredView { error: "dirty write", }); } - let r = StoredView::new_from_slice(addr_, length, decoded, s.value.get_mem_store())?; + let r = StoredView::new_from_slice(addr_, length, item, s.value.get_mem_store())?; Ok(Obj { value: r, dirty: None,