Skip to content

Commit

Permalink
chore: remove unused blob in db (#245)
Browse files Browse the repository at this point in the history
  • Loading branch information
xinifinity authored Sep 5, 2023
1 parent 5fcdaee commit 9880cdf
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 161 deletions.
3 changes: 0 additions & 3 deletions firewood/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,4 @@ pub struct DbRevConfig {
/// Maximum cached Trie objects.
#[builder(default = 1 << 20)]
pub merkle_ncached_objs: usize,
/// Maximum cached Blob (currently just `Account`) objects.
#[builder(default = 4096)]
pub blob_ncached_objs: usize,
}
107 changes: 6 additions & 101 deletions firewood/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,7 @@ use self::proposal::ProposalBase;

const MERKLE_META_SPACE: SpaceId = 0x0;
const MERKLE_PAYLOAD_SPACE: SpaceId = 0x1;
const BLOB_META_SPACE: SpaceId = 0x2;
const BLOB_PAYLOAD_SPACE: SpaceId = 0x3;
const ROOT_HASH_SPACE: SpaceId = 0x4;
const ROOT_HASH_SPACE: SpaceId = 0x2;
const SPACE_RESERVED: u64 = 0x1000;

const MAGIC_STR: &[u8; 16] = b"firewood v0.1\0\0\0";
Expand Down Expand Up @@ -233,14 +231,12 @@ impl Storable for DbHeader {
/// Necessary linear space instances bundled for the state of the entire DB.
struct Universe<T> {
merkle: SubUniverse<T>,
blob: SubUniverse<T>,
}

impl Universe<StoreRevShared> {
fn to_mem_store_r(&self) -> Universe<Arc<impl MemStoreR>> {
Universe {
merkle: self.merkle.to_mem_store_r(),
blob: self.blob.to_mem_store_r(),
}
}
}
Expand All @@ -249,7 +245,6 @@ impl Universe<Arc<StoreRevMut>> {
fn new_from_other(&self) -> Universe<Arc<StoreRevMut>> {
Universe {
merkle: self.merkle.new_from_other(),
blob: self.blob.new_from_other(),
}
}
}
Expand All @@ -258,7 +253,6 @@ impl Universe<Arc<CachedSpace>> {
fn to_mem_store_r(&self) -> Universe<Arc<impl MemStoreR>> {
Universe {
merkle: self.merkle.to_mem_store_r(),
blob: self.blob.to_mem_store_r(),
}
}
}
Expand All @@ -268,14 +262,11 @@ impl<T: MemStoreR + 'static> Universe<Arc<T>> {
&self,
merkle_meta_writes: &[SpaceWrite],
merkle_payload_writes: &[SpaceWrite],
blob_meta_writes: &[SpaceWrite],
blob_payload_writes: &[SpaceWrite],
) -> Universe<StoreRevShared> {
Universe {
merkle: self
.merkle
.rewind(merkle_meta_writes, merkle_payload_writes),
blob: self.blob.rewind(blob_meta_writes, blob_payload_writes),
}
}
}
Expand Down Expand Up @@ -338,14 +329,6 @@ impl<S: ShaleStore<Node> + Send + Sync> DbRev<S> {
let valid = proof.verify_range_proof(hash, first_key, last_key, keys, values)?;
Ok(valid)
}

/// Check if the account exists.
pub fn exist<K: AsRef<[u8]>>(&self, key: K) -> Result<bool, DbError> {
Ok(match self.merkle.get(key, self.header.acc_root) {
Ok(r) => r.is_some(),
Err(e) => return Err(DbError::Merkle(e)),
})
}
}

#[derive(Debug)]
Expand Down Expand Up @@ -407,10 +390,6 @@ impl Db {
let merkle_meta_path = file::touch_dir("meta", &merkle_path)?;
let merkle_payload_path = file::touch_dir("compact", &merkle_path)?;

let blob_path = file::touch_dir("blob", &db_path)?;
let blob_meta_path = file::touch_dir("meta", &blob_path)?;
let blob_payload_path = file::touch_dir("compact", &blob_path)?;

let root_hash_path = file::touch_dir("root_hash", &db_path)?;

let file0 = crate::file::File::new(0, SPACE_RESERVED, &merkle_meta_path)?;
Expand Down Expand Up @@ -509,41 +488,11 @@ impl Db {
.unwrap(),
),
),
blob: SubUniverse::new(
Arc::new(
CachedSpace::new(
&StoreConfig::builder()
.ncached_pages(cfg.meta_ncached_pages)
.ncached_files(cfg.meta_ncached_files)
.space_id(BLOB_META_SPACE)
.file_nbit(params.meta_file_nbit)
.rootdir(blob_meta_path)
.build(),
disk_requester.clone(),
)
.unwrap(),
),
Arc::new(
CachedSpace::new(
&StoreConfig::builder()
.ncached_pages(cfg.payload_ncached_pages)
.ncached_files(cfg.payload_ncached_files)
.space_id(BLOB_PAYLOAD_SPACE)
.file_nbit(params.payload_file_nbit)
.rootdir(blob_payload_path)
.build(),
disk_requester.clone(),
)
.unwrap(),
),
),
};

[
data_cache.merkle.meta.as_ref(),
data_cache.merkle.payload.as_ref(),
data_cache.blob.meta.as_ref(),
data_cache.blob.payload.as_ref(),
root_hash_cache.as_ref(),
]
.into_iter()
Expand All @@ -559,26 +508,18 @@ impl Db {

let base = Universe {
merkle: get_sub_universe_from_empty_delta(&data_cache.merkle),
blob: get_sub_universe_from_empty_delta(&data_cache.blob),
};

let db_header_ref = Db::get_db_header_ref(&base.merkle.meta)?;

let merkle_payload_header_ref =
Db::get_payload_header_ref(&base.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)?;

let blob_payload_header_ref = Db::get_payload_header_ref(&base.blob.meta, DbHeader::MSIZE)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);
let header_refs = (db_header_ref, merkle_payload_header_ref);

let base_revision = Db::new_revision(
header_refs,
(base.merkle.meta.clone(), base.merkle.payload.clone()),
(base.blob.meta.clone(), base.blob.payload.clone()),
params.payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand Down Expand Up @@ -618,10 +559,8 @@ impl Db {
let merkle_payload_header: DiskAddress = DiskAddress::from(offset);
offset += CompactSpaceHeader::MSIZE as usize;
assert!(offset <= SPACE_RESERVED as usize);
let blob_payload_header: DiskAddress = DiskAddress::null();

let mut merkle_meta_store = StoreRevMut::new(cached_space.merkle.meta.clone());
let mut blob_meta_store = StoreRevMut::new(cached_space.blob.meta.clone());

if reset_store_headers {
// initialize store headers
Expand All @@ -636,24 +575,13 @@ impl Db {
db_header.into(),
&shale::to_dehydrated(&DbHeader::new_empty())?,
);
blob_meta_store.write(
blob_payload_header.into(),
&shale::to_dehydrated(&shale::compact::CompactSpaceHeader::new(
NonZeroUsize::new(SPACE_RESERVED as usize).unwrap(),
NonZeroUsize::new(SPACE_RESERVED as usize).unwrap(),
))?,
);
}

let store = Universe {
merkle: SubUniverse::new(
Arc::new(merkle_meta_store),
Arc::new(StoreRevMut::new(cached_space.merkle.payload.clone())),
),
blob: SubUniverse::new(
Arc::new(blob_meta_store),
Arc::new(StoreRevMut::new(cached_space.blob.payload.clone())),
),
};

let db_header_ref = Db::get_db_header_ref(store.merkle.meta.as_ref())?;
Expand All @@ -663,18 +591,11 @@ impl Db {
Db::PARAM_SIZE + DbHeader::MSIZE,
)?;

let blob_payload_header_ref = Db::get_payload_header_ref(store.blob.meta.as_ref(), 0)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);
let header_refs = (db_header_ref, merkle_payload_header_ref);

let mut rev: DbRev<CompactSpace<Node, StoreRevMut>> = Db::new_revision(
header_refs,
(store.merkle.meta.clone(), store.merkle.payload.clone()),
(store.blob.meta.clone(), store.blob.payload.clone()),
payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand Down Expand Up @@ -703,13 +624,8 @@ impl Db {
}

fn new_revision<K: CachedStore, T: Into<Arc<K>>>(
header_refs: (
Obj<DbHeader>,
Obj<CompactSpaceHeader>,
Obj<CompactSpaceHeader>,
),
header_refs: (Obj<DbHeader>, Obj<CompactSpaceHeader>),
merkle: (T, T),
_blob: (T, T),
payload_regn_nbit: u64,
payload_max_walk: u64,
cfg: &DbRevConfig,
Expand Down Expand Up @@ -737,7 +653,7 @@ impl Db {

let merkle = Merkle::new(Box::new(merkle_space));

if db_header_ref.acc_root.is_null() {
if db_header_ref.kv_root.is_null() {
let mut err = Ok(());
// create the sentinel node
db_header_ref
Expand Down Expand Up @@ -863,14 +779,10 @@ impl Db {
Some(u) => u.to_mem_store_r().rewind(
&ash.0[&MERKLE_META_SPACE].undo,
&ash.0[&MERKLE_PAYLOAD_SPACE].undo,
&ash.0[&BLOB_META_SPACE].undo,
&ash.0[&BLOB_PAYLOAD_SPACE].undo,
),
None => inner_lock.cached_space.to_mem_store_r().rewind(
&ash.0[&MERKLE_META_SPACE].undo,
&ash.0[&MERKLE_PAYLOAD_SPACE].undo,
&ash.0[&BLOB_META_SPACE].undo,
&ash.0[&BLOB_PAYLOAD_SPACE].undo,
),
};
revisions.inner.push_back(u);
Expand All @@ -891,19 +803,12 @@ impl Db {
Db::get_payload_header_ref(&space.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)
.unwrap();

let blob_payload_header_ref = Db::get_payload_header_ref(&space.blob.meta, 0).unwrap();

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);
let header_refs = (db_header_ref, merkle_payload_header_ref);

Revision {
rev: Db::new_revision(
header_refs,
(space.merkle.meta.clone(), space.merkle.payload.clone()),
(space.blob.meta.clone(), space.blob.payload.clone()),
self.payload_regn_nbit,
0,
&self.cfg.rev,
Expand Down
Loading

0 comments on commit 9880cdf

Please sign in to comment.