Skip to content

Commit

Permalink
fixup! Remove new_revision_arc method
Browse files Browse the repository at this point in the history
  • Loading branch information
richardpringle committed Aug 10, 2023
1 parent 43efa29 commit 4a4b44a
Showing 1 changed file with 150 additions and 76 deletions.
226 changes: 150 additions & 76 deletions firewood/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ use parking_lot::{Mutex, RwLock};
#[cfg(feature = "eth")]
use primitive_types::U256;
use shale::compact::CompactSpace;
use shale::ShaleStore;
use shale::{
compact::CompactSpaceHeader, CachedStore, ObjPtr, ShaleError, SpaceId, Storable, StoredView,
};
use shale::{Obj, ShaleStore};
use std::mem::size_of;
use std::{
collections::VecDeque,
error::Error,
Expand Down Expand Up @@ -466,6 +467,8 @@ pub struct Db<S, T> {

// #[metered(registry = DbMetrics, visibility = pub)]
impl Db<Store, SharedStore> {
const PARAM_SIZE: u64 = size_of::<DbParams>() as u64;

/// Open a database.
pub fn new<P: AsRef<Path>>(db_path: P, cfg: &DbConfig) -> Result<Self, DbError> {
// TODO: make sure all fds are released at the end
Expand Down Expand Up @@ -516,7 +519,7 @@ impl Db<Store, SharedStore> {
let header_bytes = unsafe {
std::slice::from_raw_parts(
&header as *const DbParams as *const u8,
std::mem::size_of::<DbParams>(),
size_of::<DbParams>(),
)
.to_vec()
};
Expand All @@ -525,7 +528,7 @@ impl Db<Store, SharedStore> {
}

// read DbParams
let mut header_bytes = [0; std::mem::size_of::<DbParams>()];
let mut header_bytes = [0; size_of::<DbParams>()];
nix::sys::uio::pread(fd0, &mut header_bytes, 0).map_err(DbError::System)?;
drop(file0);
let params: DbParams = cast_slice(&header_bytes)[0];
Expand Down Expand Up @@ -641,13 +644,24 @@ impl Db<Store, SharedStore> {
merkle: get_sub_universe_from_empty_delta(&data_cache.merkle),
blob: get_sub_universe_from_empty_delta(&data_cache.blob),
};

let db_header_ref = Db::get_db_header_ref(&base.merkle.meta)?;

let merkle_payload_header_ref =
Db::get_payload_header_ref(&base.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)?;

let blob_payload_header_ref = Db::get_payload_header_ref(&base.blob.meta, 0)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);

let base_revision = Db::new_revision(
&base.merkle.meta,
&base.blob.meta,
base.merkle.meta.clone(),
base.merkle.payload.clone(),
base.blob.meta.clone(),
base.blob.payload.clone(),
header_refs,
(base.merkle.meta.clone(), base.merkle.payload.clone()),
(base.blob.meta.clone(), base.blob.payload.clone()),
params.payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand Down Expand Up @@ -683,7 +697,7 @@ impl Db<Store, SharedStore> {
payload_regn_nbit: u64,
cfg: &DbConfig,
) -> Result<(Universe<Arc<StoreRevMut>>, DbRev<Store>), DbError> {
let mut offset = std::mem::size_of::<DbParams>() as u64;
let mut offset = Db::PARAM_SIZE;
let db_header: ObjPtr<DbHeader> = ObjPtr::new_from_addr(offset);
offset += DbHeader::MSIZE;
let merkle_payload_header: ObjPtr<CompactSpaceHeader> = ObjPtr::new_from_addr(offset);
Expand Down Expand Up @@ -727,13 +741,25 @@ impl Db<Store, SharedStore> {
),
};

let mut rev: DbRev<CompactSpace<Node, StoreRevMut>> = Db::new_revision(
let db_header_ref = Db::get_db_header_ref(store.merkle.meta.as_ref())?;

let merkle_payload_header_ref = Db::get_payload_header_ref(
store.merkle.meta.as_ref(),
store.blob.meta.as_ref(),
store.merkle.meta.clone(),
store.merkle.payload.clone(),
store.blob.meta.clone(),
store.blob.payload.clone(),
Db::PARAM_SIZE + DbHeader::MSIZE,
)?;

let blob_payload_header_ref = Db::get_payload_header_ref(store.blob.meta.as_ref(), 0)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);

let mut rev: DbRev<CompactSpace<Node, StoreRevMut>> = Db::new_revision(
header_refs,
(store.merkle.meta.clone(), store.merkle.payload.clone()),
(store.blob.meta.clone(), store.blob.payload.clone()),
payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand All @@ -743,46 +769,50 @@ impl Db<Store, SharedStore> {
Ok((store, rev))
}

fn get_payload_header_ref<K: CachedStore>(
meta_ref: &K,
header_offset: u64,
) -> Result<Obj<CompactSpaceHeader>, DbError> {
let payload_header = ObjPtr::<CompactSpaceHeader>::new_from_addr(header_offset);
StoredView::ptr_to_obj(
meta_ref,
payload_header,
shale::compact::CompactHeader::MSIZE,
)
.map_err(Into::into)
}

fn get_db_header_ref<K: CachedStore>(meta_ref: &K) -> Result<Obj<DbHeader>, DbError> {
let db_header = ObjPtr::<DbHeader>::new_from_addr(Db::PARAM_SIZE);
StoredView::ptr_to_obj(meta_ref, db_header, DbHeader::MSIZE).map_err(Into::into)
}

fn new_revision<K: CachedStore>(
merkle_meta_ref: &K,
blob_meta_ref: &K,
merkle_meta: impl Into<Arc<K>>,
merkle_payload: impl Into<Arc<K>>,
blob_meta: impl Into<Arc<K>>,
blob_payload: impl Into<Arc<K>>,
header_refs: (
Obj<DbHeader>,
Obj<CompactSpaceHeader>,
Obj<CompactSpaceHeader>,
),
merkle: (impl Into<Arc<K>>, impl Into<Arc<K>>),
_blob: (impl Into<Arc<K>>, impl Into<Arc<K>>),
payload_regn_nbit: u64,
payload_max_walk: u64,
cfg: &DbRevConfig,
) -> Result<DbRev<CompactSpace<Node, K>>, DbError> {
// Set up the storage layout
let mut offset = std::mem::size_of::<DbParams>() as u64;
// DbHeader starts after DbParams in merkle meta space
let db_header: ObjPtr<DbHeader> = ObjPtr::new_from_addr(offset);
offset += DbHeader::MSIZE;
// Merkle CompactHeader starts after DbHeader in merkle meta space
let merkle_payload_header: ObjPtr<CompactSpaceHeader> = ObjPtr::new_from_addr(offset);
offset += CompactSpaceHeader::MSIZE;
assert!(offset <= SPACE_RESERVED);
// TODO: This should be a compile time check
const DB_OFFSET: u64 = Db::PARAM_SIZE;
let merkle_offset = DB_OFFSET + DbHeader::MSIZE;
assert!(merkle_offset + CompactSpaceHeader::MSIZE <= SPACE_RESERVED);

let mut db_header_ref =
StoredView::ptr_to_obj(merkle_meta_ref, db_header, DbHeader::MSIZE).unwrap();
let mut db_header_ref = header_refs.0;
let merkle_payload_header_ref = header_refs.1;

let merkle_payload_header_ref = StoredView::ptr_to_obj(
merkle_meta_ref,
merkle_payload_header,
shale::compact::CompactHeader::MSIZE,
)?;

#[cfg(feature = "eth")]
let blob_payload_header_ref = StoredView::ptr_to_obj(
blob_meta_ref,
blob_payload_header,
shale::compact::CompactHeader::MSIZE,
)?;
let merkle_meta = merkle.0.into();
let merkle_payload = merkle.1.into();

let merkle_space = shale::compact::CompactSpace::new(
merkle_meta.into(),
merkle_payload.into(),
merkle_meta,
merkle_payload,
merkle_payload_header_ref,
shale::ObjCache::new(cfg.merkle_ncached_objs),
payload_max_walk,
Expand All @@ -791,15 +821,22 @@ impl Db<Store, SharedStore> {
.unwrap();

#[cfg(feature = "eth")]
let blob_space = shale::compact::CompactSpace::new(
blob_meta.into(),
blob_payload.into(),
blob_payload_header_ref,
shale::ObjCache::new(cfg.blob_ncached_objs),
payload_max_walk,
payload_regn_nbit,
)
.unwrap();
let blob_space = {
let blob = _blob;
let blob_payload_header_ref = header_refs.2;
let blob_meta = blob.0.into();
let blob_payload = blob.1.into();

shale::compact::CompactSpace::new(
blob_meta,
blob_payload,
header_refs.2,
shale::ObjCache::new(cfg.blob_ncached_objs),
payload_max_walk,
payload_regn_nbit,
)
.unwrap()
};

if db_header_ref.acc_root.is_null() {
let mut err = Ok(());
Expand Down Expand Up @@ -958,20 +995,33 @@ impl Db<Store, SharedStore> {
drop(inner_lock);

let cfg = cfg.as_ref().unwrap_or(&self.rev_cfg);
Some(Revision {

let db_header_ref = Db::get_db_header_ref(&space.merkle.meta).unwrap();

let merkle_payload_header_ref =
Db::get_payload_header_ref(&space.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)
.unwrap();

let blob_payload_header_ref = Db::get_payload_header_ref(&space.blob.meta, 0).unwrap();

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);

Revision {
rev: Db::new_revision(
&space.merkle.meta,
&space.blob.meta,
space.merkle.meta.clone(),
space.merkle.payload.clone(),
space.blob.meta.clone(),
space.blob.payload.clone(),
header_refs,
(space.merkle.meta.clone(), space.merkle.payload.clone()),
(space.blob.meta.clone(), space.blob.payload.clone()),
self.payload_regn_nbit,
0,
cfg,
)
.unwrap(),
})
}
.into()
}
}

Expand Down Expand Up @@ -1112,13 +1162,26 @@ impl Proposal<Store, SharedStore> {
let m = Arc::clone(&self.m);
let r = Arc::clone(&self.r);
let cfg = self.cfg.clone();
let mut rev = Db::new_revision(

let db_header_ref = Db::get_db_header_ref(store.merkle.meta.as_ref())?;

let merkle_payload_header_ref = Db::get_payload_header_ref(
store.merkle.meta.as_ref(),
store.blob.meta.as_ref(),
store.merkle.meta.clone(),
store.merkle.payload.clone(),
store.blob.meta.clone(),
store.blob.payload.clone(),
Db::PARAM_SIZE + DbHeader::MSIZE,
)?;

let blob_payload_header_ref = Db::get_payload_header_ref(store.blob.meta.as_ref(), 0)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);

let mut rev = Db::new_revision(
header_refs,
(store.merkle.meta.clone(), store.merkle.payload.clone()),
(store.blob.meta.clone(), store.blob.payload.clone()),
cfg.payload_regn_nbit,
cfg.payload_max_walk,
&cfg.rev,
Expand Down Expand Up @@ -1274,13 +1337,24 @@ impl Proposal<Store, SharedStore> {
merkle: get_sub_universe_from_empty_delta(&rev_inner.data_cache.merkle),
blob: get_sub_universe_from_empty_delta(&rev_inner.data_cache.blob),
};

let db_header_ref = Db::get_db_header_ref(&base.merkle.meta)?;

let merkle_payload_header_ref =
Db::get_payload_header_ref(&base.merkle.meta, Db::PARAM_SIZE + DbHeader::MSIZE)?;

let blob_payload_header_ref = Db::get_payload_header_ref(&base.blob.meta, 0)?;

let header_refs = (
db_header_ref,
merkle_payload_header_ref,
blob_payload_header_ref,
);

let base_revision = Db::new_revision(
&base.merkle.meta,
&base.blob.meta,
base.merkle.meta.clone(),
base.merkle.payload.clone(),
base.blob.meta.clone(),
base.blob.payload.clone(),
header_refs,
(base.merkle.meta.clone(), base.merkle.payload.clone()),
(base.blob.meta.clone(), base.blob.payload.clone()),
0,
self.cfg.payload_max_walk,
&self.cfg.rev,
Expand Down

0 comments on commit 4a4b44a

Please sign in to comment.