From 80805dcf1d355312e559cfa5d8ebed8bc36e3fb6 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 26 Mar 2024 10:02:44 -0400 Subject: [PATCH] rename space to store (#612) --- firewood/src/config.rs | 2 +- firewood/src/db.rs | 104 ++++++++++++------------- firewood/src/db/proposal.rs | 26 +++---- firewood/src/lib.rs | 58 +++++++------- firewood/src/merkle.rs | 4 +- firewood/src/merkle_util.rs | 4 +- firewood/src/shale/compact.rs | 100 ++++++++++++------------ firewood/src/shale/in_mem.rs | 34 ++++----- firewood/src/shale/mod.rs | 30 ++++---- firewood/src/storage/buffer.rs | 100 ++++++++++++------------ firewood/src/storage/mod.rs | 134 ++++++++++++++++----------------- fwdctl/src/create.rs | 2 +- growth-ring/src/wal.rs | 4 +- 13 files changed, 301 insertions(+), 301 deletions(-) diff --git a/firewood/src/config.rs b/firewood/src/config.rs index 6a13e4ffd..9cd35a477 100644 --- a/firewood/src/config.rs +++ b/firewood/src/config.rs @@ -18,7 +18,7 @@ pub struct DbConfig { #[builder(default = 22)] // 4MB file by default pub meta_file_nbit: u64, /// Maximum cached pages for the item stash. This is the low-level cache used by the linear - /// space that holds Trie nodes and account objects. + /// store that holds Trie nodes and account objects. #[builder(default = 262144)] // 1G total size by default pub payload_ncached_pages: usize, /// Maximum cached file descriptors for the item stash. diff --git a/firewood/src/db.rs b/firewood/src/db.rs index 21678e1e0..6bbdc7273 100644 --- a/firewood/src/db.rs +++ b/firewood/src/db.rs @@ -14,7 +14,7 @@ use crate::{ }, storage::{ buffer::{DiskBuffer, DiskBufferRequester}, - CachedSpace, MemStoreR, SpaceWrite, StoreConfig, StoreDelta, StoreRevMut, StoreRevShared, + CachedStore, MemStoreR, StoreConfig, StoreDelta, StoreRevMut, StoreRevShared, StoreWrite, ZeroStore, PAGE_SIZE_NBIT, }, v2::api::{self, HashKey, KeyType, ValueType}, @@ -23,7 +23,7 @@ use crate::{ merkle, shale::{ self, compact::StoreHeader, disk_address::DiskAddress, LinearStore, Obj, ShaleError, - SpaceId, Storable, StoredView, + Storable, StoreId, StoredView, }, }; use aiofut::AioError; @@ -51,10 +51,10 @@ mod proposal; use self::proposal::ProposalBase; -const MERKLE_META_SPACE: SpaceId = 0x0; -const MERKLE_PAYLOAD_SPACE: SpaceId = 0x1; -const ROOT_HASH_SPACE: SpaceId = 0x2; -const SPACE_RESERVED: u64 = 0x1000; +const MERKLE_META_STORE_ID: StoreId = 0x0; +const MERKLE_PAYLOAD_STORE_ID: StoreId = 0x1; +const ROOT_HASH_STORE_ID: StoreId = 0x2; +const RESERVED_STORE_ID: u64 = 0x1000; const MAGIC_STR: &[u8; 16] = b"firewood v0.1\0\0\0"; @@ -118,7 +118,7 @@ struct DbParams { } #[derive(Clone, Debug)] -/// Necessary linear space instances bundled for a `Store`. +/// Necessary linear store instances bundled for a `Store`. struct SubUniverse { meta: T, payload: T, @@ -151,8 +151,8 @@ impl SubUniverse { impl SubUniverse> { fn rewind( &self, - meta_writes: &[SpaceWrite], - payload_writes: &[SpaceWrite], + meta_writes: &[StoreWrite], + payload_writes: &[StoreWrite], ) -> SubUniverse { SubUniverse::new( StoreRevShared::from_ash(self.meta.clone(), meta_writes), @@ -161,7 +161,7 @@ impl SubUniverse> { } } -impl SubUniverse> { +impl SubUniverse> { fn to_mem_store_r(&self) -> SubUniverse> { SubUniverse { meta: self.meta.clone(), @@ -171,7 +171,7 @@ impl SubUniverse> { } fn get_sub_universe_from_deltas( - sub_universe: &SubUniverse>, + sub_universe: &SubUniverse>, meta_delta: StoreDelta, payload_delta: StoreDelta, ) -> SubUniverse { @@ -182,7 +182,7 @@ fn get_sub_universe_from_deltas( } fn get_sub_universe_from_empty_delta( - sub_universe: &SubUniverse>, + sub_universe: &SubUniverse>, ) -> SubUniverse { get_sub_universe_from_deltas(sub_universe, StoreDelta::default(), StoreDelta::default()) } @@ -234,7 +234,7 @@ impl Storable for DbHeader { } #[derive(Clone, Debug)] -/// Necessary linear space instances bundled for the state of the entire DB. +/// Necessary linear store instances bundled for the state of the entire DB. struct Universe { merkle: SubUniverse, } @@ -255,7 +255,7 @@ impl Universe { } } -impl Universe> { +impl Universe> { fn to_mem_store_r(&self) -> Universe> { Universe { merkle: self.merkle.to_mem_store_r(), @@ -266,8 +266,8 @@ impl Universe> { impl Universe> { fn rewind( &self, - merkle_meta_writes: &[SpaceWrite], - merkle_payload_writes: &[SpaceWrite], + merkle_meta_writes: &[StoreWrite], + merkle_payload_writes: &[StoreWrite], ) -> Universe { Universe { merkle: self @@ -417,8 +417,8 @@ impl From> for DbRev { struct DbInner { disk_requester: DiskBufferRequester, disk_thread: Option>, - cached_space: Universe>, - // Whether to reset the store headers when creating a new store on top of the cached space. + cached_store: Universe>, + // Whether to reset the store headers when creating a new store on top of the cached store. reset_store_headers: bool, root_hash_staging: StoreRevMut, } @@ -511,7 +511,7 @@ impl Db { let merkle_payload_path = file::touch_dir("compact", &merkle_path)?; let root_hash_path = file::touch_dir("root_hash", &db_path)?; - let meta_file = crate::file::File::new(0, SPACE_RESERVED, &merkle_meta_path)?; + let meta_file = crate::file::File::new(0, RESERVED_STORE_ID, &merkle_meta_path)?; let meta_fd = meta_file.as_fd(); if reset_store_headers { @@ -552,11 +552,11 @@ impl Db { // set up caches #[allow(clippy::unwrap_used)] - let root_hash_cache: Arc = CachedSpace::new( + let root_hash_cache: Arc = CachedStore::new( &StoreConfig::builder() .ncached_pages(cfg.root_hash_ncached_pages) .ncached_files(cfg.root_hash_ncached_files) - .space_id(ROOT_HASH_SPACE) + .store_id(ROOT_HASH_STORE_ID) .file_nbit(params.root_hash_file_nbit) .rootdir(root_hash_path) .build(), @@ -567,12 +567,12 @@ impl Db { #[allow(clippy::unwrap_used)] let data_cache = Universe { - merkle: SubUniverse::>::new( - CachedSpace::new( + merkle: SubUniverse::>::new( + CachedStore::new( &StoreConfig::builder() .ncached_pages(cfg.meta_ncached_pages) .ncached_files(cfg.meta_ncached_files) - .space_id(MERKLE_META_SPACE) + .store_id(MERKLE_META_STORE_ID) .file_nbit(params.meta_file_nbit) .rootdir(merkle_meta_path) .build(), @@ -580,11 +580,11 @@ impl Db { ) .unwrap() .into(), - CachedSpace::new( + CachedStore::new( &StoreConfig::builder() .ncached_pages(cfg.payload_ncached_pages) .ncached_files(cfg.payload_ncached_files) - .space_id(MERKLE_PAYLOAD_SPACE) + .store_id(MERKLE_PAYLOAD_STORE_ID) .file_nbit(params.payload_file_nbit) .rootdir(merkle_payload_path) .build(), @@ -601,8 +601,8 @@ impl Db { root_hash_cache.as_ref(), ] .into_iter() - .for_each(|cached_space| { - disk_requester.reg_cached_space(cached_space.id(), cached_space.clone_files()); + .for_each(|cached_store| { + disk_requester.reg_cached_store(cached_store.id(), cached_store.clone_files()); }); // recover from Wal @@ -635,7 +635,7 @@ impl Db { inner: Arc::new(RwLock::new(DbInner { disk_thread, disk_requester, - cached_space: data_cache, + cached_store: data_cache, reset_store_headers, root_hash_staging: StoreRevMut::new(root_hash_cache), })), @@ -678,9 +678,9 @@ impl Db { }) .chain({ // write out the StoreHeader - let space_reserved = - NonZeroUsize::new(SPACE_RESERVED as usize).expect("SPACE_RESERVED is non-zero"); - csh = StoreHeader::new(space_reserved, space_reserved); + let store_reserved = NonZeroUsize::new(RESERVED_STORE_ID as usize) + .expect("RESERVED_STORE_ID is non-zero"); + csh = StoreHeader::new(store_reserved, store_reserved); bytemuck::bytes_of(&csh) }) .copied() @@ -693,7 +693,7 @@ impl Db { /// Create a new mutable store and an alterable revision of the DB on top. fn new_store( &self, - cached_space: &Universe>, + cached_store: &Universe>, reset_store_headers: bool, ) -> Result<(Universe, DbRev), DbError> { let mut offset = Db::PARAM_SIZE as usize; @@ -701,9 +701,9 @@ impl Db { offset += DbHeader::MSIZE as usize; let merkle_payload_header: DiskAddress = DiskAddress::from(offset); offset += StoreHeader::SERIALIZED_LEN as usize; - assert!(offset <= SPACE_RESERVED as usize); + assert!(offset <= RESERVED_STORE_ID as usize); - let mut merkle_meta_store = StoreRevMut::new(cached_space.merkle.meta.clone()); + let mut merkle_meta_store = StoreRevMut::new(cached_store.merkle.meta.clone()); if reset_store_headers { // initialize store headers @@ -711,9 +711,9 @@ impl Db { merkle_meta_store.write( merkle_payload_header.into(), &shale::to_dehydrated(&shale::compact::StoreHeader::new( - NonZeroUsize::new(SPACE_RESERVED as usize).unwrap(), + NonZeroUsize::new(RESERVED_STORE_ID as usize).unwrap(), #[allow(clippy::unwrap_used)] - NonZeroUsize::new(SPACE_RESERVED as usize).unwrap(), + NonZeroUsize::new(RESERVED_STORE_ID as usize).unwrap(), ))?, )?; merkle_meta_store.write( @@ -725,7 +725,7 @@ impl Db { let store = Universe { merkle: SubUniverse::new( merkle_meta_store, - StoreRevMut::new(cached_space.merkle.payload.clone()), + StoreRevMut::new(cached_store.merkle.payload.clone()), ), }; @@ -777,7 +777,7 @@ impl Db { // TODO: This should be a compile time check const DB_OFFSET: u64 = Db::PARAM_SIZE; let merkle_offset = DB_OFFSET + DbHeader::MSIZE; - assert!(merkle_offset + StoreHeader::SERIALIZED_LEN <= SPACE_RESERVED); + assert!(merkle_offset + StoreHeader::SERIALIZED_LEN <= RESERVED_STORE_ID); let mut db_header_ref = header_refs.0; let merkle_payload_header_ref = header_refs.1; @@ -786,7 +786,7 @@ impl Db { let merkle_payload = merkle.1.into(); #[allow(clippy::unwrap_used)] - let merkle_space = shale::compact::Store::new( + let merkle_store = shale::compact::Store::new( merkle_meta, merkle_payload, merkle_payload_header_ref, @@ -796,7 +796,7 @@ impl Db { ) .unwrap(); - let merkle = Merkle::new(merkle_space); + let merkle = Merkle::new(merkle_store); if db_header_ref.kv_root.is_null() { let mut err = Ok(()); @@ -826,7 +826,7 @@ impl Db { ) -> Result { let mut inner = self.inner.write(); let reset_store_headers = inner.reset_store_headers; - let (store, mut rev) = self.new_store(&inner.cached_space, reset_store_headers)?; + let (store, mut rev) = self.new_store(&inner.cached_store, reset_store_headers)?; // Flip the reset flag after resetting the store headers. if reset_store_headers { @@ -900,7 +900,7 @@ impl Db { StoreRevShared::from_ash( Arc::new(ZeroStore::default()), #[allow(clippy::indexing_slicing)] - &ash.0[&ROOT_HASH_SPACE].redo, + &ash.0[&ROOT_HASH_STORE_ID].redo, ) }) .map(|root_hash_store| { @@ -928,22 +928,22 @@ impl Db { let u = match revisions.inner.back() { Some(u) => u.to_mem_store_r().rewind( #[allow(clippy::indexing_slicing)] - &ash.0[&MERKLE_META_SPACE].undo, + &ash.0[&MERKLE_META_STORE_ID].undo, #[allow(clippy::indexing_slicing)] - &ash.0[&MERKLE_PAYLOAD_SPACE].undo, + &ash.0[&MERKLE_PAYLOAD_STORE_ID].undo, ), - None => inner_lock.cached_space.to_mem_store_r().rewind( + None => inner_lock.cached_store.to_mem_store_r().rewind( #[allow(clippy::indexing_slicing)] - &ash.0[&MERKLE_META_SPACE].undo, + &ash.0[&MERKLE_META_STORE_ID].undo, #[allow(clippy::indexing_slicing)] - &ash.0[&MERKLE_PAYLOAD_SPACE].undo, + &ash.0[&MERKLE_PAYLOAD_STORE_ID].undo, ), }; revisions.inner.push_back(u); } } - let space = if nback == 0 { + let store = if nback == 0 { &revisions.base } else { #[allow(clippy::indexing_slicing)] @@ -953,11 +953,11 @@ impl Db { drop(inner_lock); #[allow(clippy::unwrap_used)] - let db_header_ref = Db::get_db_header_ref(&space.merkle.meta).unwrap(); + let db_header_ref = Db::get_db_header_ref(&store.merkle.meta).unwrap(); #[allow(clippy::unwrap_used)] let merkle_payload_header_ref = - Db::get_payload_header_ref(&space.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE) + Db::get_payload_header_ref(&store.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE) .unwrap(); let header_refs = (db_header_ref, merkle_payload_header_ref); @@ -965,7 +965,7 @@ impl Db { #[allow(clippy::unwrap_used)] Db::new_revision( header_refs, - (space.merkle.meta.clone(), space.merkle.payload.clone()), + (store.merkle.meta.clone(), store.merkle.payload.clone()), self.payload_regn_nbit, 0, &self.cfg.rev, diff --git a/firewood/src/db/proposal.rs b/firewood/src/db/proposal.rs index e6c96ff2f..5eba71df7 100644 --- a/firewood/src/db/proposal.rs +++ b/firewood/src/db/proposal.rs @@ -3,7 +3,7 @@ use super::{ get_sub_universe_from_deltas, Db, DbConfig, DbError, DbHeader, DbInner, DbRev, DbRevInner, - Universe, MERKLE_META_SPACE, MERKLE_PAYLOAD_SPACE, ROOT_HASH_SPACE, + Universe, MERKLE_META_STORE_ID, MERKLE_PAYLOAD_STORE_ID, ROOT_HASH_STORE_ID, }; use crate::merkle::{Bincode, MerkleKeyValueStream, Proof}; use crate::shale::LinearStore; @@ -171,21 +171,21 @@ impl Proposal { } }; - // clear the staging layer and apply changes to the CachedSpace + // clear the staging layer and apply changes to the CachedStore let (merkle_payload_redo, merkle_payload_wal) = store.merkle.payload.delta(); let (merkle_meta_redo, merkle_meta_wal) = store.merkle.meta.delta(); let mut rev_inner = m.write(); #[allow(clippy::unwrap_used)] let merkle_meta_undo = rev_inner - .cached_space + .cached_store .merkle .meta .update(&merkle_meta_redo) .unwrap(); #[allow(clippy::unwrap_used)] let merkle_payload_undo = rev_inner - .cached_space + .cached_store .merkle .payload .update(&merkle_payload_redo) @@ -194,7 +194,7 @@ impl Proposal { // update the rolling window of past revisions let latest_past = Universe { merkle: get_sub_universe_from_deltas( - &rev_inner.cached_space.merkle, + &rev_inner.cached_store.merkle, merkle_meta_undo, merkle_payload_undo, ), @@ -204,10 +204,10 @@ impl Proposal { if let Some(rev) = revisions.inner.front_mut() { rev.merkle .meta - .set_base_space(latest_past.merkle.meta.inner().clone()); + .set_base_store(latest_past.merkle.meta.inner().clone()); rev.merkle .payload - .set_base_space(latest_past.merkle.payload.inner().clone()); + .set_base_store(latest_past.merkle.payload.inner().clone()); } revisions.inner.push_front(latest_past); while revisions.inner.len() > max_revisions { @@ -231,23 +231,23 @@ impl Proposal { rev_inner.disk_requester.write( Box::new([ BufferWrite { - space_id: store.merkle.payload.id(), + store_id: store.merkle.payload.id(), delta: merkle_payload_redo, }, BufferWrite { - space_id: store.merkle.meta.id(), + store_id: store.merkle.meta.id(), delta: merkle_meta_redo, }, BufferWrite { - space_id: rev_inner.root_hash_staging.id(), + store_id: rev_inner.root_hash_staging.id(), delta: root_hash_redo, }, ]), AshRecord( [ - (MERKLE_META_SPACE, merkle_meta_wal), - (MERKLE_PAYLOAD_SPACE, merkle_payload_wal), - (ROOT_HASH_SPACE, root_hash_wal), + (MERKLE_META_STORE_ID, merkle_meta_wal), + (MERKLE_PAYLOAD_STORE_ID, merkle_payload_wal), + (ROOT_HASH_STORE_ID, root_hash_wal), ] .into(), ), diff --git a/firewood/src/lib.rs b/firewood/src/lib.rs index d72b76721..c8f57a904 100644 --- a/firewood/src/lib.rs +++ b/firewood/src/lib.rs @@ -69,12 +69,12 @@ //! Firewood is built by three layers of abstractions that totally decouple the //! layout/representation of the data on disk from the actual logical data structure it retains: //! -//! - Linear, memory-like space: the `shale` crate offers a `CachedStore` abstraction for a -//! (64-bit) byte-addressable space that abstracts away the intricate method that actually persists +//! - Linear, memory-like store: the `shale` crate offers a `CachedStore` abstraction for a +//! (64-bit) byte-addressable store that abstracts away the intricate method that actually persists //! the in-memory data on the secondary storage medium (e.g., hard drive). The implementor of `CachedStore` //! provides the functions to give the user of `CachedStore` an illusion that the user is operating upon a -//! byte-addressable memory space. It is just a "magical" array of bytes one can view and change -//! that is mirrored to the disk. In reality, the linear space will be chunked into files under a +//! byte-addressable memory store. It is just a "magical" array of bytes one can view and change +//! that is mirrored to the disk. In reality, the linear store will be chunked into files under a //! directory, but the user does not have to even know about this. //! //! - Persistent item storage stash: `CompactStore` in `shale` defines a pool of typed objects that are @@ -87,9 +87,9 @@ //! and maintain the code. //! //! Given the abstraction, one can easily realize the fact that the actual data that affect the -//! state of the data structure (trie) is what the linear space (`CachedStore`) keeps track of. That is, +//! state of the data structure (trie) is what the linear store (`CachedStore`) keeps track of. That is, //! a flat but conceptually large byte vector. In other words, given a valid byte vector as the -//! content of the linear space, the higher level data structure can be *uniquely* determined, there +//! content of the linear store, the higher level data structure can be *uniquely* determined, there //! is nothing more (except for some auxiliary data that are kept for performance reasons, such as caching) //! or less than that, like a way to interpret the bytes. This nice property allows us to completely //! separate the logical data from its physical representation, greatly simplifies the storage @@ -99,18 +99,18 @@ //! //! ## Page-based Shadowing and Revisions //! -//! Following the idea that the tries are just a view of a linear byte space, all writes made to the +//! Following the idea that the tries are just a view of a linear byte store, all writes made to the //! tries inside Firewood will eventually be consolidated into some interval writes to the linear -//! space. The writes may overlap and some frequent writes are even done to the same spot in the -//! space. To reduce the overhead and be friendly to the disk, we partition the entire 64-bit -//! virtual space into pages (yeah it appears to be more and more like an OS) and keep track of the +//! store. The writes may overlap and some frequent writes are even done to the same spot in the +//! store. To reduce the overhead and be friendly to the disk, we partition the entire 64-bit +//! virtual store into pages (yeah it appears to be more and more like an OS) and keep track of the //! dirty pages in some `CachedStore` instantiation (see `storage::StoreRevMut`). When a //! [`db::Proposal`] commits, both the recorded interval writes and the aggregated in-memory -//! dirty pages induced by this write batch are taken out from the linear space. Although they are +//! dirty pages induced by this write batch are taken out from the linear store. Although they are //! mathematically equivalent, interval writes are more compact than pages (which are 4K in size, //! become dirty even if a single byte is touched upon) . So interval writes are fed into the WAL //! subsystem (supported by growthring). After the WAL record is written (one record per write batch), -//! the dirty pages are then pushed to the on-disk linear space to mirror the change by some +//! the dirty pages are then pushed to the on-disk linear store to mirror the change by some //! asynchronous, out-of-order file writes. See the `BufferCmd::WriteBatch` part of `DiskBuffer::process` //! for the detailed logic. //! @@ -120,15 +120,15 @@ //! memory, then: //! //! - Bring the necessary pages that contain the accessed nodes into the memory and cache them -//! (`storage::CachedSpace`). +//! (`storage::CachedStore`). //! //! - Make changes to the trie, and that induces the writes to some nodes. The nodes are either //! already cached in memory (its pages are cached, or its handle `ObjRef` is still in //! `shale::ObjCache`) or need to be brought into the memory (if that's the case, go back to the //! second step for it). //! -//! - Writes to nodes are converted into interval writes to the stagging `StoreRevMut` space that -//! overlays atop `CachedSpace`, so all dirty pages during the current write batch will be +//! - Writes to nodes are converted into interval writes to the stagging `StoreRevMut` store that +//! overlays atop `CachedStore`, so all dirty pages during the current write batch will be //! exactly captured in `StoreRevMut` (see `StoreRevMut::delta`). //! //! - Finally: @@ -139,18 +139,18 @@ //! //! - Commit: otherwise, the write batch is committed, the interval writes (`storage::Ash`) will be bundled //! into a single WAL record (`storage::AshRecord`) and sent to WAL subsystem, before dirty pages -//! are scheduled to be written to the space files. Also the dirty pages are applied to the -//! underlying `CachedSpace`. `StoreRevMut` becomes empty again for further write batches. +//! are scheduled to be written to the store files. Also the dirty pages are applied to the +//! underlying `CachedStore`. `StoreRevMut` becomes empty again for further write batches. //! -//! Parts of the following diagram show this normal flow, the "staging" space (implemented by +//! Parts of the following diagram show this normal flow, the "staging" store (implemented by //! `StoreRevMut`) concept is a bit similar to the staging area in Git, which enables the handling //! of (resuming from) write errors, clean abortion of an on-going write batch so the entire store //! state remains intact, and also reduces unnecessary premature disk writes. Essentially, we -//! copy-on-write pages in the space that are touched upon, without directly mutating the -//! underlying "master" space. The staging space is just a collection of these "shadowing" pages +//! copy-on-write pages in the store that are touched upon, without directly mutating the +//! underlying "master" store. The staging store is just a collection of these "shadowing" pages //! and a reference to the its base (master) so any reads could partially hit those dirty pages //! and/or fall through to the base, whereas all writes are captured. Finally, when things go well, -//! we "push down" these changes to the base and clear up the staging space. +//! we "push down" these changes to the base and clear up the staging store. //! //!

//! @@ -161,25 +161,25 @@ //! shows previously logged write batch records could be kept even though they are no longer needed //! for the purpose of crash recovery. The interval writes from a record can be aggregated into //! pages (see `storage::StoreDelta::new`) and used to reconstruct a "ghost" image of past -//! revision of the linear space (just like how staging space works, except that the ghost space is +//! revision of the linear store (just like how staging store works, except that the ghost store is //! essentially read-only once constructed). The shadow pages there will function as some -//! "rewinding" changes to patch the necessary locations in the linear space, while the rest of the -//! linear space is very likely untouched by that historical write batch. +//! "rewinding" changes to patch the necessary locations in the linear store, while the rest of the +//! linear store is very likely untouched by that historical write batch. //! //! Then, with the three-layer abstraction we previously talked about, a historical trie could be //! derived. In fact, because there is no mandatory traversal or scanning in the process, the //! only cost to revive a historical state from the log is to just playback the records and create -//! those shadow pages. There is very little additional cost because the ghost space is summoned on an +//! those shadow pages. There is very little additional cost because the ghost store is summoned on an //! on-demand manner while one accesses the historical trie. //! //! In the other direction, when new write batches are committed, the system moves forward, we can //! therefore maintain a rolling window of past revisions in memory with *zero* cost. The -//! mid-bottom of the diagram shows when a write batch is committed, the persisted (master) space goes one -//! step forward, the staging space is cleared, and an extra ghost space (colored in purple) can be +//! mid-bottom of the diagram shows when a write batch is committed, the persisted (master) store goes one +//! step forward, the staging store is cleared, and an extra ghost store (colored in purple) can be //! created to hold the version of the store before the commit. The backward delta is applied to //! counteract the change that has been made to the persisted store, which is also a set of shadow pages. -//! No change is required for other historical ghost space instances. Finally, we can phase out -//! some very old ghost space to keep the size of the rolling window invariant. +//! No change is required for other historical ghost store instances. Finally, we can phase out +//! some very old ghost store to keep the size of the rolling window invariant. //! pub mod db; pub(crate) mod file; diff --git a/firewood/src/merkle.rs b/firewood/src/merkle.rs index 588cc4c24..2c8be8225 100644 --- a/firewood/src/merkle.rs +++ b/firewood/src/merkle.rs @@ -1434,11 +1434,11 @@ mod tests { let mem_payload = InMemLinearStore::new(0x10000, 0x1); let cache = shale::ObjCache::new(1); - let space = + let store = shale::compact::Store::new(mem_meta, mem_payload, compact_header, cache, 10, 16) .expect("Store init fail"); - Merkle::new(space) + Merkle::new(store) } pub(super) fn create_test_merkle() -> Merkle { diff --git a/firewood/src/merkle_util.rs b/firewood/src/merkle_util.rs index e62b78459..36492e291 100644 --- a/firewood/src/merkle_util.rs +++ b/firewood/src/merkle_util.rs @@ -71,11 +71,11 @@ where let mem_payload = InMemLinearStore::new(compact_size, 0x1); let cache = shale::ObjCache::new(1); - let space = + let store = shale::compact::Store::new(mem_meta, mem_payload, compact_header, cache, 10, 16) .expect("Store init fail"); - let merkle = Merkle::new(space); + let merkle = Merkle::new(store); #[allow(clippy::unwrap_used)] let root = merkle.init_root().unwrap(); diff --git a/firewood/src/shale/compact.rs b/firewood/src/shale/compact.rs index c5da7f654..d661333ef 100644 --- a/firewood/src/shale/compact.rs +++ b/firewood/src/shale/compact.rs @@ -162,41 +162,41 @@ impl Storable for ChunkDescriptor { #[repr(C)] #[derive(Copy, Clone, Debug, Pod, Zeroable)] pub struct StoreHeader { - meta_space_tail: DiskAddress, - data_space_tail: DiskAddress, + meta_store_tail: DiskAddress, + data_store_tail: DiskAddress, base_addr: DiskAddress, alloc_addr: DiskAddress, } #[derive(Debug)] struct StoreHeaderSliced { - meta_space_tail: Obj, - data_space_tail: Obj, + meta_store_tail: Obj, + data_store_tail: Obj, base_addr: Obj, alloc_addr: Obj, } impl StoreHeaderSliced { fn flush_dirty(&mut self) { - self.meta_space_tail.flush_dirty(); - self.data_space_tail.flush_dirty(); + self.meta_store_tail.flush_dirty(); + self.data_store_tail.flush_dirty(); self.base_addr.flush_dirty(); self.alloc_addr.flush_dirty(); } } impl StoreHeader { - const META_SPACE_TAIL_OFFSET: usize = 0; - const DATA_SPACE_TAIL_OFFSET: usize = DiskAddress::SERIALIZED_LEN as usize; + const META_STORE_TAIL_OFFSET: usize = 0; + const DATA_STORE_TAIL_OFFSET: usize = DiskAddress::SERIALIZED_LEN as usize; const BASE_ADDR_OFFSET: usize = - Self::DATA_SPACE_TAIL_OFFSET + DiskAddress::SERIALIZED_LEN as usize; + Self::DATA_STORE_TAIL_OFFSET + DiskAddress::SERIALIZED_LEN as usize; const ALLOC_ADDR_OFFSET: usize = Self::BASE_ADDR_OFFSET + DiskAddress::SERIALIZED_LEN as usize; pub const SERIALIZED_LEN: u64 = Self::ALLOC_ADDR_OFFSET as u64 + DiskAddress::SERIALIZED_LEN; pub const fn new(meta_base: NonZeroUsize, compact_base: NonZeroUsize) -> Self { Self { - meta_space_tail: DiskAddress::new(meta_base), - data_space_tail: DiskAddress::new(compact_base), + meta_store_tail: DiskAddress::new(meta_base), + data_store_tail: DiskAddress::new(compact_base), base_addr: DiskAddress::new(meta_base), alloc_addr: DiskAddress::new(meta_base), } @@ -204,17 +204,17 @@ impl StoreHeader { fn into_fields(r: Obj) -> Result { Ok(StoreHeaderSliced { - meta_space_tail: StoredView::slice( + meta_store_tail: StoredView::slice( &r, - Self::META_SPACE_TAIL_OFFSET, + Self::META_STORE_TAIL_OFFSET, DiskAddress::SERIALIZED_LEN, - r.meta_space_tail, + r.meta_store_tail, )?, - data_space_tail: StoredView::slice( + data_store_tail: StoredView::slice( &r, - Self::DATA_SPACE_TAIL_OFFSET, + Self::DATA_STORE_TAIL_OFFSET, DiskAddress::SERIALIZED_LEN, - r.data_space_tail, + r.data_store_tail, )?, base_addr: StoredView::slice( &r, @@ -241,11 +241,11 @@ impl Storable for StoreHeader { size: Self::SERIALIZED_LEN, })?; #[allow(clippy::indexing_slicing)] - let meta_space_tail = raw.as_deref()[..Self::DATA_SPACE_TAIL_OFFSET] + let meta_store_tail = raw.as_deref()[..Self::DATA_STORE_TAIL_OFFSET] .try_into() .expect("Self::MSIZE = 4 * DiskAddress::MSIZE"); #[allow(clippy::indexing_slicing)] - let data_space_tail = raw.as_deref()[Self::DATA_SPACE_TAIL_OFFSET..Self::BASE_ADDR_OFFSET] + let data_store_tail = raw.as_deref()[Self::DATA_STORE_TAIL_OFFSET..Self::BASE_ADDR_OFFSET] .try_into() .expect("Self::MSIZE = 4 * DiskAddress::MSIZE"); #[allow(clippy::indexing_slicing)] @@ -257,8 +257,8 @@ impl Storable for StoreHeader { .try_into() .expect("Self::MSIZE = 4 * DiskAddress::MSIZE"); Ok(Self { - meta_space_tail, - data_space_tail, + meta_store_tail, + data_store_tail, base_addr, alloc_addr, }) @@ -270,8 +270,8 @@ impl Storable for StoreHeader { fn serialize(&self, to: &mut [u8]) -> Result<(), ShaleError> { let mut cur = Cursor::new(to); - cur.write_all(&self.meta_space_tail.to_le_bytes())?; - cur.write_all(&self.data_space_tail.to_le_bytes())?; + cur.write_all(&self.meta_store_tail.to_le_bytes())?; + cur.write_all(&self.data_store_tail.to_le_bytes())?; cur.write_all(&self.base_addr.to_le_bytes())?; cur.write_all(&self.alloc_addr.to_le_bytes())?; Ok(()) @@ -280,8 +280,8 @@ impl Storable for StoreHeader { #[derive(Debug)] struct StoreInner { - meta_space: M, - data_space: M, + meta_store: M, + data_store: M, header: StoreHeaderSliced, alloc_max_walk: u64, regn_nbit: u64, @@ -290,8 +290,8 @@ struct StoreInner { impl From> for StoreInner { fn from(value: StoreInner) -> StoreInner { StoreInner { - meta_space: value.meta_space.into(), - data_space: value.data_space.into(), + meta_store: value.meta_store.into(), + data_store: value.data_store.into(), header: value.header, alloc_max_walk: value.alloc_max_walk, regn_nbit: value.regn_nbit, @@ -301,7 +301,7 @@ impl From> for StoreInner { impl StoreInner { fn get_descriptor(&self, ptr: DiskAddress) -> Result, ShaleError> { - StoredView::ptr_to_obj(&self.meta_space, ptr, ChunkDescriptor::SERIALIZED_LEN) + StoredView::ptr_to_obj(&self.meta_store, ptr, ChunkDescriptor::SERIALIZED_LEN) } fn get_data_ref( @@ -309,7 +309,7 @@ impl StoreInner { ptr: DiskAddress, len_limit: u64, ) -> Result, ShaleError> { - StoredView::ptr_to_obj(&self.data_space, ptr, len_limit) + StoredView::ptr_to_obj(&self.data_store, ptr, len_limit) } fn get_header(&self, ptr: DiskAddress) -> Result, ShaleError> { @@ -326,12 +326,12 @@ impl StoreInner { // debug_assert!((desc_addr.0 - self.header.base_addr.value.into()) % desc_size == 0); #[allow(clippy::unwrap_used)] self.header - .meta_space_tail + .meta_store_tail .modify(|r| *r -= desc_size as usize) .unwrap(); - if desc_addr != DiskAddress(**self.header.meta_space_tail) { - let desc_last = self.get_descriptor(*self.header.meta_space_tail.value)?; + if desc_addr != DiskAddress(**self.header.meta_store_tail) { + let desc_last = self.get_descriptor(*self.header.meta_store_tail.value)?; let mut desc = self.get_descriptor(desc_addr)?; #[allow(clippy::unwrap_used)] desc.modify(|r| *r = *desc_last).unwrap(); @@ -344,10 +344,10 @@ impl StoreInner { } fn new_desc(&mut self) -> Result { - let addr = **self.header.meta_space_tail; + let addr = **self.header.meta_store_tail; #[allow(clippy::unwrap_used)] self.header - .meta_space_tail + .meta_store_tail .modify(|r| *r += ChunkDescriptor::SERIALIZED_LEN as usize) .unwrap(); @@ -387,7 +387,7 @@ impl StoreInner { let mut f = offset; #[allow(clippy::unwrap_used)] - if offset + ChunkFooter::SERIALIZED_LEN < self.header.data_space_tail.unwrap().get() as u64 + if offset + ChunkFooter::SERIALIZED_LEN < self.header.data_store_tail.unwrap().get() as u64 && (regn_size - (offset & (regn_size - 1))) >= ChunkFooter::SERIALIZED_LEN + ChunkHeader::SERIALIZED_LEN { @@ -440,7 +440,7 @@ impl StoreInner { const FOOTER_SIZE: usize = ChunkFooter::SERIALIZED_LEN as usize; const DESCRIPTOR_SIZE: usize = ChunkDescriptor::SERIALIZED_LEN as usize; - let tail = *self.header.meta_space_tail; + let tail = *self.header.meta_store_tail; if tail == *self.header.base_addr { return Ok(None); } @@ -550,10 +550,10 @@ impl StoreInner { fn alloc_new(&mut self, length: u64) -> Result { let regn_size = 1 << self.regn_nbit; let total_length = ChunkHeader::SERIALIZED_LEN + length + ChunkFooter::SERIALIZED_LEN; - let mut offset = *self.header.data_space_tail; + let mut offset = *self.header.data_store_tail; #[allow(clippy::unwrap_used)] self.header - .data_space_tail + .data_store_tail .modify(|r| { // an item is always fully in one region let rem = regn_size - (offset & (regn_size - 1)).get(); @@ -602,8 +602,8 @@ pub struct Store { impl Store { pub fn new( - meta_space: M, - data_space: M, + meta_store: M, + data_store: M, header: Obj, obj_cache: super::ObjCache, alloc_max_walk: u64, @@ -611,8 +611,8 @@ impl Store { ) -> Result { let cs = Store { inner: RwLock::new(StoreInner { - meta_space, - data_space, + meta_store, + data_store, header: StoreHeader::into_fields(header)?, alloc_max_walk, regn_nbit, @@ -645,9 +645,9 @@ impl Store { #[allow(clippy::unwrap_used)] let obj = { let inner = self.inner.read().unwrap(); - let data_space = &inner.data_space; + let data_store = &inner.data_store; #[allow(clippy::unwrap_used)] - let view = StoredView::item_to_obj(data_space, addr.try_into().unwrap(), size, item)?; + let view = StoredView::item_to_obj(data_store, addr.try_into().unwrap(), size, item)?; self.obj_cache.put(view) }; @@ -761,14 +761,14 @@ mod tests { } #[test] - fn test_space_item() { + fn test_store_item() { let meta_size: NonZeroUsize = NonZeroUsize::new(0x10000).unwrap(); let compact_size: NonZeroUsize = NonZeroUsize::new(0x10000).unwrap(); let reserved: DiskAddress = 0x1000.into(); let mut dm = InMemLinearStore::new(meta_size.get() as u64, 0x0); - // initialize compact space + // initialize compact store let compact_header = DiskAddress::from(0x1); dm.write( compact_header.unwrap().get(), @@ -782,15 +782,15 @@ mod tests { let mem_payload = InMemLinearStore::new(compact_size.get() as u64, 0x1); let cache: ObjCache = ObjCache::new(1); - let space = Store::new(mem_meta, mem_payload, compact_header, cache, 10, 16).unwrap(); + let store = Store::new(mem_meta, mem_payload, compact_header, cache, 10, 16).unwrap(); // initial write let data = b"hello world"; let hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(data).into(); - let obj_ref = space.put_item(Hash(hash), 0).unwrap(); + let obj_ref = store.put_item(Hash(hash), 0).unwrap(); assert_eq!(obj_ref.as_ptr(), DiskAddress::from(4113)); // create hash ptr from address and attempt to read dirty write. - let hash_ref = space.get_item(DiskAddress::from(4113)).unwrap(); + let hash_ref = store.get_item(DiskAddress::from(4113)).unwrap(); // read before flush results in zeroed hash assert_eq!(hash_ref.as_ref(), ZERO_HASH.as_ref()); // not cached @@ -815,7 +815,7 @@ mod tests { drop(obj_ref); // write is visible assert_eq!( - space.get_item(DiskAddress::from(4113)).unwrap().as_ref(), + store.get_item(DiskAddress::from(4113)).unwrap().as_ref(), hash ); } diff --git a/firewood/src/shale/in_mem.rs b/firewood/src/shale/in_mem.rs index 78e0c4cb9..8fea38497 100644 --- a/firewood/src/shale/in_mem.rs +++ b/firewood/src/shale/in_mem.rs @@ -1,7 +1,7 @@ // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE.md for licensing terms. -use crate::shale::{LinearStore, LinearStoreView, SendSyncDerefMut, SpaceId}; +use crate::shale::{LinearStore, LinearStoreView, SendSyncDerefMut, StoreId}; use std::{ fmt::Debug, ops::{Deref, DerefMut}, @@ -14,14 +14,14 @@ use super::ShaleError; // [CachedStore]. Allocates more space on `write` if original size isn't enough. #[derive(Debug)] pub struct InMemLinearStore { - space: Arc>>, - id: SpaceId, + store: Arc>>, + id: StoreId, } impl InMemLinearStore { - pub fn new(size: u64, id: SpaceId) -> Self { - let space = Arc::new(RwLock::new(vec![0; size as usize])); - Self { space, id } + pub fn new(size: u64, id: StoreId) -> Self { + let store = Arc::new(RwLock::new(vec![0; size as usize])); + Self { store, id } } } @@ -34,18 +34,18 @@ impl LinearStore for InMemLinearStore { let length = length as usize; let size = offset + length; #[allow(clippy::unwrap_used)] - let mut space = self.space.write().unwrap(); + let mut store = self.store.write().unwrap(); // Increase the size if the request range exceeds the current limit. - if size > space.len() { - space.resize(size, 0); + if size > store.len() { + store.resize(size, 0); } Some(Box::new(InMemLinearStoreView { offset, length, mem: Self { - space: self.space.clone(), + store: self.store.clone(), id: self.id, }, })) @@ -53,7 +53,7 @@ impl LinearStore for InMemLinearStore { fn get_shared(&self) -> Box> { Box::new(InMemLinearStoreShared(Self { - space: self.space.clone(), + store: self.store.clone(), id: self.id, })) } @@ -63,19 +63,19 @@ impl LinearStore for InMemLinearStore { let size = offset + length; #[allow(clippy::unwrap_used)] - let mut space = self.space.write().unwrap(); + let mut store = self.store.write().unwrap(); // Increase the size if the request range exceeds the current limit. - if size > space.len() { - space.resize(size, 0); + if size > store.len() { + store.resize(size, 0); } #[allow(clippy::indexing_slicing)] - space[offset..offset + length].copy_from_slice(change); + store[offset..offset + length].copy_from_slice(change); Ok(()) } - fn id(&self) -> SpaceId { + fn id(&self) -> StoreId { self.id } @@ -100,7 +100,7 @@ impl LinearStoreView for InMemLinearStoreView { fn as_deref(&self) -> Self::DerefReturn { #[allow(clippy::indexing_slicing, clippy::unwrap_used)] - self.mem.space.read().unwrap()[self.offset..self.offset + self.length].to_vec() + self.mem.store.read().unwrap()[self.offset..self.offset + self.length].to_vec() } } diff --git a/firewood/src/shale/mod.rs b/firewood/src/shale/mod.rs index de7b756b5..a290fe415 100644 --- a/firewood/src/shale/mod.rs +++ b/firewood/src/shale/mod.rs @@ -45,11 +45,11 @@ pub enum ShaleError { // this could probably included with ShaleError, // but keeping it separate for now as Obj/ObjRef might change in the near future #[derive(Debug, Error)] -#[error("object cannot be written in the space provided")] +#[error("object cannot be written in the store provided")] pub struct ObjWriteSizeError; -pub type SpaceId = u8; -pub const INVALID_SPACE_ID: SpaceId = 0xff; +pub type StoreId = u8; +pub const INVALID_STORE_ID: StoreId = 0xff; /// A handle that pins and provides a readable access to a portion of a [LinearStore]. pub trait LinearStoreView { @@ -61,7 +61,7 @@ pub trait SendSyncDerefMut: DerefMut + Send + Sync {} impl SendSyncDerefMut for T {} -/// In-memory store that offers access to intervals from a linear byte space, which is usually +/// In-memory store that offers access to intervals from a linear byte store, which is usually /// backed by a cached/memory-mapped pool of the accessed intervals from the underlying linear /// persistent store. Reads may trigger disk reads to bring data into memory, but writes will /// *only* be visible in memory -- they do not write to disk. @@ -77,12 +77,12 @@ pub trait LinearStore: Debug + Send + Sync { /// Returns a handle that allows shared access to this store. fn get_shared(&self) -> Box>; - /// Write the `change` to the linear space starting at `offset`. The change should - /// be immediately visible to all `LinearStoreView` associated with this linear space. + /// Write the `change` to the linear store starting at `offset`. The change should + /// be immediately visible to all `LinearStoreView` associated with this linear store. fn write(&mut self, offset: usize, change: &[u8]) -> Result<(), ShaleError>; - /// Returns the identifier of this storage space. - fn id(&self) -> SpaceId; + /// Returns the identifier of this store. + fn id(&self) -> StoreId; /// Returns whether or not this store is writable fn is_writeable(&self) -> bool; @@ -330,13 +330,13 @@ impl StoredView { impl StoredView { #[inline(always)] - fn new(offset: usize, len_limit: u64, space: &U) -> Result { - let item = T::deserialize(offset, space)?; + fn new(offset: usize, len_limit: u64, store: &U) -> Result { + let item = T::deserialize(offset, store)?; Ok(Self { offset, item, - mem: space.get_shared(), + mem: store.get_shared(), len_limit, }) } @@ -346,12 +346,12 @@ impl StoredView { offset: usize, len_limit: u64, item: T, - space: &dyn LinearStore, + store: &dyn LinearStore, ) -> Result { Ok(Self { offset, item, - mem: space.get_shared(), + mem: store.get_shared(), len_limit, }) } @@ -387,12 +387,12 @@ impl StoredView { offset: usize, len_limit: u64, item: T, - space: &dyn LinearStore, + store: &dyn LinearStore, ) -> Result { Ok(Self { offset, item, - mem: space.get_shared(), + mem: store.get_shared(), len_limit, }) } diff --git a/firewood/src/storage/buffer.rs b/firewood/src/storage/buffer.rs index 75295d296..23fd753be 100644 --- a/firewood/src/storage/buffer.rs +++ b/firewood/src/storage/buffer.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use std::{cell::RefCell, collections::HashMap}; use super::{AshRecord, FilePool, Page, StoreDelta, StoreError, WalConfig, PAGE_SIZE_NBIT}; -use crate::shale::SpaceId; +use crate::shale::StoreId; use crate::storage::DeltaPage; use aiofut::{AioBuilder, AioError, AioManager}; use futures::future::join_all; @@ -40,10 +40,10 @@ pub enum BufferCmd { /// Process a write batch against the underlying store. WriteBatch(BufferWrites, AshRecord), /// Get a page from the disk buffer. - GetPage((SpaceId, u64), oneshot::Sender>), + GetPage((StoreId, u64), oneshot::Sender>), CollectAsh(usize, oneshot::Sender>), - /// Register a new space and add the files to a memory mapped pool. - RegCachedSpace(SpaceId, Arc), + /// Register a new store and add the files to a memory mapped pool. + RegCachedStore(StoreId, Arc), /// Returns false if the Shutdown, } @@ -77,7 +77,7 @@ pub struct DiskBufferConfig { /// List of pages to write to disk. #[derive(Debug)] pub struct BufferWrite { - pub space_id: SpaceId, + pub store_id: StoreId, pub delta: StoreDelta, } @@ -212,12 +212,12 @@ struct WalQueueMax { /// Add an pending pages to aio manager for processing by the local pool. fn schedule_write( - pending: Rc>>, + pending: Rc>>, fc_notifier: Rc, file_pools: Rc>; 255]>>, aiomgr: Rc, max: WalQueueMax, - page_key: (SpaceId, u64), + page_key: (StoreId, u64), ) { use std::collections::hash_map::Entry::*; @@ -299,12 +299,12 @@ async fn init_wal( |raw, _| { let batch = AshRecord::deserialize(raw); - for (space_id, ash) in batch.0 { + for (store_id, ash) in batch.0 { for (undo, redo) in ash.iter() { let offset = undo.offset; let file_pools = file_pools.borrow(); #[allow(clippy::unwrap_used, clippy::indexing_slicing)] - let file_pool = file_pools[space_id as usize].as_ref().unwrap(); + let file_pool = file_pools[store_id as usize].as_ref().unwrap(); let file_nbit = file_pool.get_file_nbit(); let file_mask = (1 << file_nbit) - 1; let fid = offset >> file_nbit; @@ -343,7 +343,7 @@ async fn init_wal( async fn run_wal_queue( max: WalQueueMax, wal: Rc>>, - pending: Rc>>, + pending: Rc>>, file_pools: Rc>; 255]>>, mut writes: mpsc::Receiver<(BufferWrites, AshRecord)>, fc_notifier: Rc, @@ -382,9 +382,9 @@ async fn run_wal_queue( let sem = Rc::new(tokio::sync::Semaphore::new(0)); let mut npermit = 0; - for BufferWrite { space_id, delta } in bwrites { + for BufferWrite { store_id, delta } in bwrites { for DeltaPage(page_id, page) in delta.0 { - let page_key = (space_id, page_id); + let page_key = (store_id, page_id); let should_write = match pending.borrow_mut().entry(page_key) { Occupied(mut e) => { @@ -461,7 +461,7 @@ fn panic_on_intialization_failure_with<'a, T>( #[allow(clippy::too_many_arguments)] async fn process( - pending: Rc>>, + pending: Rc>>, fc_notifier: Rc, file_pools: Rc>; 255]>>, aiomgr: Rc, @@ -544,11 +544,11 @@ async fn process( #[allow(clippy::unwrap_used)] tx.send(ash).unwrap(); } - BufferCmd::RegCachedSpace(space_id, files) => { + BufferCmd::RegCachedStore(store_id, files) => { file_pools .borrow_mut() .as_mut_slice() - .index_mut(space_id as usize) + .index_mut(store_id as usize) .replace(files); } } @@ -581,10 +581,10 @@ impl DiskBufferRequester { } /// Get a page from the buffer. - pub fn get_page(&self, space_id: SpaceId, page_id: u64) -> Option { + pub fn get_page(&self, store_id: StoreId, page_id: u64) -> Option { let (resp_tx, resp_rx) = oneshot::channel(); self.sender - .send(BufferCmd::GetPage((space_id, page_id), resp_tx)) + .send(BufferCmd::GetPage((store_id, page_id), resp_tx)) .map_err(StoreError::Send) .ok(); #[allow(clippy::unwrap_used)] @@ -625,10 +625,10 @@ impl DiskBufferRequester { block_in_place(|| resp_rx.blocking_recv().map_err(StoreError::Receive)) } - /// Register a cached space to the buffer. - pub fn reg_cached_space(&self, space_id: SpaceId, files: Arc) { + /// Register a cached store to the buffer. + pub fn reg_cached_store(&self, store_id: StoreId, files: Arc) { self.sender - .send(BufferCmd::RegCachedSpace(space_id, files)) + .send(BufferCmd::RegCachedStore(store_id, files)) .map_err(StoreError::Send) .ok(); } @@ -644,12 +644,12 @@ mod tests { use crate::{ file, storage::{ - Ash, CachedSpace, MemStoreR, StoreConfig, StoreRevMut, StoreRevMutDelta, + Ash, CachedStore, MemStoreR, StoreConfig, StoreRevMut, StoreRevMutDelta, StoreRevShared, ZeroStore, }, }; - const STATE_SPACE: SpaceId = 0x0; + const STATE_STORE_ID: StoreId = 0x0; const HASH_SIZE: usize = 32; fn get_tmp_dir() -> PathBuf { @@ -659,15 +659,15 @@ mod tests { .join("firewood") } - fn new_cached_space_for_test( + fn new_cached_store_for_test( state_path: PathBuf, disk_requester: DiskBufferRequester, - ) -> Arc { - CachedSpace::new( + ) -> Arc { + CachedStore::new( &StoreConfig::builder() .ncached_pages(1) .ncached_files(1) - .space_id(STATE_SPACE) + .store_id(STATE_STORE_ID) .file_nbit(1) .rootdir(state_path) .build(), @@ -697,11 +697,11 @@ mod tests { disk_requester.init_wal("wal", &root_db_path); // create a new state cache which tracks on disk state. - let state_cache = new_cached_space_for_test(state_path, disk_requester.clone()); + let state_cache = new_cached_store_for_test(state_path, disk_requester.clone()); - // add an in memory cached space. this will allow us to write to the + // add an in memory cached store. this will allow us to write to the // disk buffer then later persist the change to disk. - disk_requester.reg_cached_space(state_cache.id(), state_cache.inner.read().files.clone()); + disk_requester.reg_cached_store(state_cache.id(), state_cache.inner.read().files.clone()); // memory mapped store let mut mut_store = StoreRevMut::new(state_cache); @@ -710,14 +710,14 @@ mod tests { // write to the in memory buffer not to disk mut_store.write(0, change).unwrap(); - assert_eq!(mut_store.id(), STATE_SPACE); + assert_eq!(mut_store.id(), STATE_STORE_ID); // mutate the in memory buffer. let change = b"this is another test"; // write to the in memory buffer (ash) not yet to disk mut_store.write(0, change).unwrap(); - assert_eq!(mut_store.id(), STATE_SPACE); + assert_eq!(mut_store.id(), STATE_STORE_ID); // wal should have no records. assert!(disk_requester.collect_ash(1).unwrap().is_empty()); @@ -734,7 +734,7 @@ mod tests { // wal is empty assert!(d1.collect_ash(1).unwrap().is_empty()); // page is not yet persisted to disk. - assert!(d1.get_page(STATE_SPACE, 0).is_none()); + assert!(d1.get_page(STATE_STORE_ID, 0).is_none()); d1.write(page_batch, write_batch); }); // wait for the write to complete. @@ -773,11 +773,11 @@ mod tests { disk_requester.init_wal("wal", &root_db_path); // create a new state cache which tracks on disk state. - let state_cache = new_cached_space_for_test(state_path, disk_requester.clone()); + let state_cache = new_cached_store_for_test(state_path, disk_requester.clone()); - // add an in memory cached space. this will allow us to write to the + // add an in memory cached store. this will allow us to write to the // disk buffer then later persist the change to disk. - disk_requester.reg_cached_space(state_cache.id(), state_cache.clone_files()); + disk_requester.reg_cached_store(state_cache.id(), state_cache.clone_files()); // memory mapped store let mut mut_store = StoreRevMut::new(state_cache.clone()); @@ -788,7 +788,7 @@ mod tests { // write to the in memory buffer (ash) not yet to disk mut_store.write(0, &hash).unwrap(); - assert_eq!(mut_store.id(), STATE_SPACE); + assert_eq!(mut_store.id(), STATE_STORE_ID); // wal should have no records. assert!(disk_requester.collect_ash(1).unwrap().is_empty()); @@ -798,7 +798,7 @@ mod tests { assert_eq!(view.as_deref(), hash); // Commit the change. Take the delta from cached store, - // then apply changes to the CachedSpace. + // then apply changes to the CachedStore. let (redo_delta, wal) = mut_store.delta(); state_cache.update(&redo_delta).unwrap(); @@ -806,13 +806,13 @@ mod tests { // wal is empty assert!(disk_requester.collect_ash(1).unwrap().is_empty()); // page is not yet persisted to disk. - assert!(disk_requester.get_page(STATE_SPACE, 0).is_none()); + assert!(disk_requester.get_page(STATE_STORE_ID, 0).is_none()); disk_requester.write( Box::new([BufferWrite { - space_id: STATE_SPACE, + store_id: STATE_STORE_ID, delta: redo_delta, }]), - AshRecord([(STATE_SPACE, wal)].into()), + AshRecord([(STATE_STORE_ID, wal)].into()), ); // verify @@ -822,7 +822,7 @@ mod tests { // replay the redo from the wal let shared_store = StoreRevShared::from_ash( Arc::new(ZeroStore::default()), - &ashes[0].0[&STATE_SPACE].redo, + &ashes[0].0[&STATE_STORE_ID].redo, ); let view = shared_store.get_view(0, hash.len() as u64).unwrap(); assert_eq!(view.as_deref(), hash); @@ -846,11 +846,11 @@ mod tests { disk_requester.init_wal("wal", &root_db_path); // create a new state cache which tracks on disk state. - let state_cache: Arc = CachedSpace::new( + let state_cache: Arc = CachedStore::new( &StoreConfig::builder() .ncached_pages(1) .ncached_files(1) - .space_id(STATE_SPACE) + .store_id(STATE_STORE_ID) .file_nbit(1) .rootdir(state_path) .build(), @@ -859,9 +859,9 @@ mod tests { .unwrap() .into(); - // add an in memory cached space. this will allow us to write to the + // add an in memory cached store. this will allow us to write to the // disk buffer then later persist the change to disk. - disk_requester.reg_cached_space(state_cache.id(), state_cache.clone_files()); + disk_requester.reg_cached_store(state_cache.id(), state_cache.clone_files()); // memory mapped store let mut store = StoreRevMut::new(state_cache.clone()); @@ -870,7 +870,7 @@ mod tests { let data = b"this is a test"; let hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(data).into(); block_in_place(|| store.write(0, &hash)).unwrap(); - assert_eq!(store.id(), STATE_SPACE); + assert_eq!(store.id(), STATE_STORE_ID); let another_data = b"this is another test"; let another_hash: [u8; HASH_SIZE] = sha3::Keccak256::digest(another_data).into(); @@ -878,7 +878,7 @@ mod tests { // mutate the in memory buffer in another StoreRev new from the above. let mut another_store = StoreRevMut::new_from_other(&store); block_in_place(|| another_store.write(32, &another_hash)).unwrap(); - assert_eq!(another_store.id(), STATE_SPACE); + assert_eq!(another_store.id(), STATE_STORE_ID); // wal should have no records. assert!(block_in_place(|| disk_requester.collect_ash(1)) @@ -915,7 +915,7 @@ mod tests { assert_eq!(1, another_redo_delta.0.len()); assert_eq!(2, another_wal.undo.len()); - // Verify after the changes been applied to underlying CachedSpace, + // Verify after the changes been applied to underlying CachedStore, // the newly created stores should see the previous changes. state_cache.update(&redo_delta).unwrap(); let store = StoreRevMut::new(state_cache.clone()); @@ -960,11 +960,11 @@ mod tests { pages.sort_by_key(|p| p.0); let page_batch = Box::new([BufferWrite { - space_id: STATE_SPACE, + store_id: STATE_STORE_ID, delta: StoreDelta(pages), }]); - let write_batch = AshRecord([(STATE_SPACE, deltas.plain)].into()); + let write_batch = AshRecord([(STATE_STORE_ID, deltas.plain)].into()); (page_batch, write_batch) } } diff --git a/firewood/src/storage/mod.rs b/firewood/src/storage/mod.rs index a073dde85..a9eb6e3a9 100644 --- a/firewood/src/storage/mod.rs +++ b/firewood/src/storage/mod.rs @@ -4,7 +4,7 @@ // TODO: try to get rid of the use `RefCell` in this file use self::buffer::DiskBufferRequester; use crate::file::File; -use crate::shale::{self, LinearStore, LinearStoreView, SendSyncDerefMut, ShaleError, SpaceId}; +use crate::shale::{self, LinearStore, LinearStoreView, SendSyncDerefMut, ShaleError, StoreId}; use nix::fcntl::{Flock, FlockArg}; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; @@ -52,14 +52,14 @@ impl From for StoreError { pub trait MemStoreR: Debug + Send + Sync { /// Returns a slice of bytes from memory. fn get_slice(&self, offset: u64, length: u64) -> Option>; - fn id(&self) -> SpaceId; + fn id(&self) -> StoreId; } // Page should be boxed as to not take up so much stack-space type Page = Box<[u8; PAGE_SIZE as usize]>; #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct SpaceWrite { +pub struct StoreWrite { offset: u64, data: Box<[u8]>, } @@ -68,19 +68,19 @@ pub struct SpaceWrite { /// In memory representation of Write-ahead log with `undo` and `redo`. pub struct Ash { /// Deltas to undo the changes. - pub undo: Vec, + pub undo: Vec, /// Deltas to replay the changes. - pub redo: Vec, + pub redo: Vec, } impl Ash { - fn iter(&self) -> impl Iterator { + fn iter(&self) -> impl Iterator { self.undo.iter().zip(self.redo.iter()) } } #[derive(Debug, serde::Serialize, serde::Deserialize)] -pub struct AshRecord(pub HashMap); +pub struct AshRecord(pub HashMap); impl growthring::wal::Record for AshRecord { fn serialize(&self) -> growthring::wal::WalBytes { @@ -97,7 +97,7 @@ impl AshRecord { } } -/// Basic copy-on-write item in the linear storage space for multi-versioning. +/// Basic copy-on-write item in the linear storage store for multi-versioning. pub struct DeltaPage(u64, Page); impl DeltaPage { @@ -134,7 +134,7 @@ impl Deref for StoreDelta { } impl StoreDelta { - pub fn new(src: &dyn MemStoreR, writes: &[SpaceWrite]) -> Self { + pub fn new(src: &dyn MemStoreR, writes: &[StoreWrite]) -> Self { let mut deltas = Vec::new(); #[allow(clippy::indexing_slicing)] let mut widx: Vec<_> = (0..writes.len()) @@ -220,7 +220,7 @@ impl StoreDelta { } pub struct StoreRev { - base_space: RwLock>, + base_store: RwLock>, delta: StoreDelta, } @@ -236,7 +236,7 @@ impl fmt::Debug for StoreRev { impl MemStoreR for StoreRev { fn get_slice(&self, offset: u64, length: u64) -> Option> { - let base_space = self.base_space.read(); + let base_store = self.base_store.read(); let mut start = offset; let end = start + length; let delta = &self.delta; @@ -244,7 +244,7 @@ impl MemStoreR for StoreRev { let mut r = delta.len(); // no dirty page, before or after all dirty pages if r == 0 { - return base_space.get_slice(start, end - start); + return base_store.get_slice(start, end - start); } // otherwise, some dirty pages are covered by the range while r - l > 1 { @@ -262,7 +262,7 @@ impl MemStoreR for StoreRev { } #[allow(clippy::indexing_slicing)] if l >= delta.len() || end < delta[l].offset() { - return base_space.get_slice(start, end - start); + return base_store.get_slice(start, end - start); } let mut data = Vec::new(); #[allow(clippy::indexing_slicing)] @@ -270,7 +270,7 @@ impl MemStoreR for StoreRev { #[allow(clippy::indexing_slicing)] if start < delta[l].offset() { #[allow(clippy::indexing_slicing)] - data.extend(base_space.get_slice(start, delta[l].offset() - start)?); + data.extend(base_store.get_slice(start, delta[l].offset() - start)?); #[allow(clippy::indexing_slicing)] data.extend(&delta[l].data()[..p_off as usize]); } else { @@ -283,13 +283,13 @@ impl MemStoreR for StoreRev { l += 1; #[allow(clippy::indexing_slicing)] if l >= delta.len() || end < delta[l].offset() { - data.extend(base_space.get_slice(start, end - start)?); + data.extend(base_store.get_slice(start, end - start)?); break; } #[allow(clippy::indexing_slicing)] if delta[l].offset() > start { #[allow(clippy::indexing_slicing)] - data.extend(base_space.get_slice(start, delta[l].offset() - start)?); + data.extend(base_store.get_slice(start, delta[l].offset() - start)?); } #[allow(clippy::indexing_slicing)] if end < delta[l].offset() + PAGE_SIZE { @@ -306,8 +306,8 @@ impl MemStoreR for StoreRev { Some(data) } - fn id(&self) -> SpaceId { - self.base_space.read().id() + fn id(&self) -> StoreId { + self.base_store.read().id() } } @@ -315,19 +315,19 @@ impl MemStoreR for StoreRev { pub struct StoreRevShared(Arc); impl StoreRevShared { - pub fn from_ash(base_space: Arc, writes: &[SpaceWrite]) -> Self { - let delta = StoreDelta::new(base_space.as_ref(), writes); - let base_space = RwLock::new(base_space); - Self(Arc::new(StoreRev { base_space, delta })) + pub fn from_ash(base_store: Arc, writes: &[StoreWrite]) -> Self { + let delta = StoreDelta::new(base_store.as_ref(), writes); + let base_store = RwLock::new(base_store); + Self(Arc::new(StoreRev { base_store, delta })) } - pub fn from_delta(base_space: Arc, delta: StoreDelta) -> Self { - let base_space = RwLock::new(base_space); - Self(Arc::new(StoreRev { base_space, delta })) + pub fn from_delta(base_store: Arc, delta: StoreDelta) -> Self { + let base_store = RwLock::new(base_store); + Self(Arc::new(StoreRev { base_store, delta })) } - pub fn set_base_space(&mut self, base_space: Arc) { - *self.0.base_space.write() = base_space + pub fn set_base_store(&mut self, base_store: Arc) { + *self.0.base_store.write() = base_store } pub const fn inner(&self) -> &Arc { @@ -355,7 +355,7 @@ impl LinearStore for StoreRevShared { Err(ShaleError::ImmutableWrite) } - fn id(&self) -> SpaceId { + fn id(&self) -> StoreId { ::id(&self.0) } @@ -375,7 +375,7 @@ impl From for StoreRevShared { let delta = StoreDelta(pages); let rev = Arc::new(StoreRev { - base_space: RwLock::new(value.base_space), + base_store: RwLock::new(value.base_store), delta, }); StoreRevShared(rev) @@ -425,13 +425,13 @@ struct StoreRevMutDelta { #[derive(Clone, Debug)] /// A mutable revision of the store. The view is constructed by applying the `deltas` to the -/// `base space`. The `deltas` tracks both `undo` and `redo` to be able to rewind or reapply +/// `base_store`. The `deltas` tracks both `undo` and `redo` to be able to rewind or reapply /// the changes. `StoreRevMut` supports basing on top of another `StoreRevMut`, by chaining /// `prev_deltas` (from based `StoreRevMut`) with current `deltas` from itself . In this way, /// callers can create a new `StoreRevMut` from an existing one without actually committing -/// the mutations to the base space. +/// the mutations to the base store. pub struct StoreRevMut { - base_space: Arc, + base_store: Arc, deltas: Arc>, prev_deltas: Arc>, } @@ -439,7 +439,7 @@ pub struct StoreRevMut { impl From for StoreRevMut { fn from(value: StoreRevShared) -> Self { StoreRevMut { - base_space: value.0.base_space.read().clone(), + base_store: value.0.base_store.read().clone(), deltas: Arc::new(RwLock::new(StoreRevMutDelta::default())), prev_deltas: Arc::new(RwLock::new(StoreRevMutDelta::default())), } @@ -447,9 +447,9 @@ impl From for StoreRevMut { } impl StoreRevMut { - pub fn new(base_space: Arc) -> Self { + pub fn new(base_store: Arc) -> Self { Self { - base_space, + base_store, deltas: Default::default(), prev_deltas: Default::default(), } @@ -457,7 +457,7 @@ impl StoreRevMut { pub fn new_from_other(other: &StoreRevMut) -> Self { Self { - base_space: other.base_space.clone(), + base_store: other.base_store.clone(), deltas: Default::default(), prev_deltas: other.deltas.clone(), } @@ -476,7 +476,7 @@ impl StoreRevMut { .or_insert_with(|| match prev_deltas.pages.get(&pid) { Some(p) => Box::new(*p.as_ref()), None => Box::new( - self.base_space + self.base_store .get_slice(pid << PAGE_SIZE_NBIT, PAGE_SIZE) .unwrap() .try_into() @@ -528,7 +528,7 @@ impl LinearStore for StoreRevMut { None => match prev_deltas.get(&s_pid) { #[allow(clippy::indexing_slicing)] Some(p) => p[s_off..e_off + 1].to_vec(), - None => self.base_space.get_slice(offset as u64, length)?, + None => self.base_store.get_slice(offset as u64, length)?, }, } } else { @@ -539,7 +539,7 @@ impl LinearStore for StoreRevMut { #[allow(clippy::indexing_slicing)] Some(p) => p[s_off..].to_vec(), None => self - .base_space + .base_store .get_slice(offset as u64, PAGE_SIZE - s_off as u64)?, }, }; @@ -549,7 +549,7 @@ impl LinearStore for StoreRevMut { None => match prev_deltas.get(&p) { Some(p) => data.extend(**p), None => data.extend( - &self.base_space.get_slice(p << PAGE_SIZE_NBIT, PAGE_SIZE)?, + &self.base_store.get_slice(p << PAGE_SIZE_NBIT, PAGE_SIZE)?, ), }, }; @@ -561,7 +561,7 @@ impl LinearStore for StoreRevMut { #[allow(clippy::indexing_slicing)] Some(p) => data.extend(&p[..e_off + 1]), None => data.extend( - self.base_space + self.base_store .get_slice(e_pid << PAGE_SIZE_NBIT, e_off as u64 + 1)?, ), }, @@ -629,11 +629,11 @@ impl LinearStore for StoreRevMut { let plain = &mut self.deltas.write().plain; assert!(undo.len() == redo.len()); - plain.undo.push(SpaceWrite { + plain.undo.push(StoreWrite { offset: offset as u64, data: undo.into(), }); - plain.redo.push(SpaceWrite { + plain.redo.push(StoreWrite { offset: offset as u64, data: redo, }); @@ -641,8 +641,8 @@ impl LinearStore for StoreRevMut { Ok(()) } - fn id(&self) -> SpaceId { - self.base_space.id() + fn id(&self) -> StoreId { + self.base_store.id() } fn is_writeable(&self) -> bool { @@ -659,8 +659,8 @@ impl MemStoreR for ZeroStore { Some(vec![0; length as usize]) } - fn id(&self) -> SpaceId { - shale::INVALID_SPACE_ID + fn id(&self) -> StoreId { + shale::INVALID_STORE_ID } } @@ -686,7 +686,7 @@ mod test { canvas[(idx - min) as usize] = *byte; } println!("[0x{l:x}, 0x{r:x})"); - writes.push(SpaceWrite { offset: l, data }); + writes.push(StoreWrite { offset: l, data }); } let z = Arc::new(ZeroStore::default()); let rev = StoreRevShared::from_ash(z, &writes); @@ -719,12 +719,12 @@ pub struct StoreConfig { ncached_files: usize, #[builder(default = 22)] // 4MB file by default file_nbit: u64, - space_id: SpaceId, + store_id: StoreId, rootdir: PathBuf, } #[derive(Debug)] -struct CachedSpaceInner { +struct CachedStoreInner { cached_pages: lru::LruCache, pinned_pages: HashMap, files: Arc, @@ -732,20 +732,20 @@ struct CachedSpaceInner { } #[derive(Clone, Debug)] -pub struct CachedSpace { - inner: Arc>, - space_id: SpaceId, +pub struct CachedStore { + inner: Arc>, + store_id: StoreId, } -impl CachedSpace { +impl CachedStore { pub fn new( cfg: &StoreConfig, disk_requester: DiskBufferRequester, ) -> Result> { - let space_id = cfg.space_id; + let store_id = cfg.store_id; let files = Arc::new(FilePool::new(cfg)?); Ok(Self { - inner: Arc::new(RwLock::new(CachedSpaceInner { + inner: Arc::new(RwLock::new(CachedStoreInner { cached_pages: lru::LruCache::new( NonZeroUsize::new(cfg.ncached_pages).expect("non-zero cache size"), ), @@ -753,7 +753,7 @@ impl CachedSpace { files, disk_requester, })), - space_id, + store_id, }) } @@ -765,7 +765,7 @@ impl CachedSpace { pub fn update(&self, delta: &StoreDelta) -> Option { let mut pages = Vec::new(); for DeltaPage(pid, page) in &delta.0 { - let data = self.inner.write().pin_page(self.space_id, *pid).ok()?; + let data = self.inner.write().pin_page(self.store_id, *pid).ok()?; // save the original data #[allow(clippy::unwrap_used)] pages.push(DeltaPage(*pid, Box::new(data.try_into().unwrap()))); @@ -776,10 +776,10 @@ impl CachedSpace { } } -impl CachedSpaceInner { +impl CachedStoreInner { fn pin_page( &mut self, - space_id: SpaceId, + store_id: StoreId, pid: u64, ) -> Result<&'static mut [u8], StoreError> { let base = match self.pinned_pages.get_mut(&pid) { @@ -791,7 +791,7 @@ impl CachedSpaceInner { let page = self .cached_pages .pop(&pid) - .or_else(|| self.disk_requester.get_page(space_id, pid)); + .or_else(|| self.disk_requester.get_page(store_id, pid)); let mut page = match page { Some(page) => page, None => { @@ -844,7 +844,7 @@ impl CachedSpaceInner { struct PageRef { pid: u64, data: &'static mut [u8], - store: CachedSpace, + store: CachedStore, } impl std::ops::Deref for PageRef { @@ -861,10 +861,10 @@ impl std::ops::DerefMut for PageRef { } impl PageRef { - fn new(pid: u64, store: &CachedSpace) -> Option { + fn new(pid: u64, store: &CachedStore) -> Option { Some(Self { pid, - data: store.inner.write().pin_page(store.space_id, pid).ok()?, + data: store.inner.write().pin_page(store.store_id, pid).ok()?, store: store.clone(), }) } @@ -876,7 +876,7 @@ impl Drop for PageRef { } } -impl MemStoreR for CachedSpace { +impl MemStoreR for CachedStore { fn get_slice(&self, offset: u64, length: u64) -> Option> { if length == 0 { return Some(Default::default()); @@ -903,8 +903,8 @@ impl MemStoreR for CachedSpace { Some(data) } - fn id(&self) -> SpaceId { - self.space_id + fn id(&self) -> StoreId { + self.store_id } } diff --git a/fwdctl/src/create.rs b/fwdctl/src/create.rs index a1984f7af..fff7afe68 100644 --- a/fwdctl/src/create.rs +++ b/fwdctl/src/create.rs @@ -51,7 +51,7 @@ pub struct Options { default_value_t = 262144, value_name = "PAYLOAD_NCACHED_PAGES", help = "Maximum cached pages for the item stash. This is the low-level cache used by the linear - space that holds trie nodes and account objects." + store that holds trie nodes and account objects." )] pub payload_ncached_pages: usize, diff --git a/growth-ring/src/wal.rs b/growth-ring/src/wal.rs index 0a128f78c..cc984b8b4 100644 --- a/growth-ring/src/wal.rs +++ b/growth-ring/src/wal.rs @@ -190,7 +190,7 @@ struct WalState { #[async_trait(?Send)] pub trait WalFile { - /// Initialize the file space in [offset, offset + length) to zero. + /// Initialize the file store in [offset, offset + length) to zero. async fn allocate(&self, offset: WalPos, length: usize) -> Result<(), WalError>; /// Write data with offset. We assume all previous `allocate`/`truncate` invocations are visible /// if ordered earlier (should be guaranteed by most OS). Additionally, the write caused @@ -380,7 +380,7 @@ impl> WalFilePool { std::mem::replace(&mut *self.last_write.get(), std::mem::MaybeUninit::uninit()) .assume_init() }; - // pre-allocate the file space + // pre-allocate the file store let alloc = async move { last_write.await?; let mut last_h: Option<