Skip to content

Commit

Permalink
Logging in firewood, initial version (#489)
Browse files Browse the repository at this point in the history
  • Loading branch information
rkuris authored Jan 15, 2024
1 parent 89e740d commit eb416aa
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 2 deletions.
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,12 @@ There are several examples, in the examples directory, that simulate real world
use-cases. Try running them via the command-line, via `cargo run --release
--example simple`.

## Logging

If you want logging, enable the `logging` feature flag, and then set RUST\_LOG accordingly.
See the documentation for [env\_logger](https://docs.rs/env_logger/latest/env_logger/) for specifics.
We currently have very few logging statements, but this is useful for print-style debugging.

## Release

See the [release documentation](./RELEASE.md) for detailed information on how to release Firewood.
Expand Down
5 changes: 5 additions & 0 deletions firewood/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ tokio = { version = "1.21.1", features = ["rt", "sync", "macros", "rt-multi-thre
typed-builder = "0.18.0"
bincode = "1.3.3"
bitflags = "2.4.1"
env_logger = { version = "0.10.1", optional = true }
log = { version = "0.4.20", optional = true }

[features]
logger = ["dep:env_logger", "log"]

[dev-dependencies]
criterion = {version = "0.5.1", features = ["async_tokio"]}
Expand Down
5 changes: 5 additions & 0 deletions firewood/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,11 @@ impl Db {
let _ = tokio::fs::remove_dir_all(db_path.as_ref()).await;
}

#[cfg(feature = "logger")]
// initialize the logger, but ignore if this fails. This could fail because the calling
// library already initialized the logger or if you're opening a second database
let _ = env_logger::try_init();

block_in_place(|| Db::new_internal(db_path, cfg.clone()))
.map_err(|e| api::Error::InternalError(Box::new(e)))
}
Expand Down
1 change: 1 addition & 0 deletions firewood/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,4 +195,5 @@ pub mod nibbles;
// TODO: shale should not be pub, but there are integration test dependencies :(
pub mod shale;

pub mod logger;
pub mod v2;
26 changes: 26 additions & 0 deletions firewood/src/logger.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
// See the file LICENSE.md for licensing terms.

// Supports making the logging operations a true runtime no-op
// Since we're a library, we can't really use the logging level
// static shortcut

#[cfg(feature = "logger")]
pub use log::{debug, error, info, trace, warn};

#[cfg(not(feature = "logger"))]
pub use noop_logger::{debug, error, info, trace, warn};

#[cfg(not(feature = "logger"))]
mod noop_logger {
#[macro_export]
macro_rules! noop {
($(target: $a:expr,)? $b:tt) => {};
}

pub use noop as debug;
pub use noop as error;
pub use noop as info;
pub use noop as trace;
pub use noop as warn;
}
8 changes: 6 additions & 2 deletions firewood/src/merkle/node.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
// See the file LICENSE.md for licensing terms.

use crate::logger::trace;
use crate::{
merkle::from_nibbles,
shale::{disk_address::DiskAddress, CachedStore, ShaleError, ShaleStore, Storable},
Expand Down Expand Up @@ -360,15 +361,17 @@ mod type_id {
use type_id::NodeTypeId;

impl Storable for Node {
fn deserialize<T: CachedStore>(mut offset: usize, mem: &T) -> Result<Self, ShaleError> {
fn deserialize<T: CachedStore>(offset: usize, mem: &T) -> Result<Self, ShaleError> {
let meta_raw =
mem.get_view(offset, Meta::SIZE as u64)
.ok_or(ShaleError::InvalidCacheView {
offset,
size: Meta::SIZE as u64,
})?;

offset += Meta::SIZE;
trace!("[{mem:p}] Deserializing node at {offset}");

let offset = offset + Meta::SIZE;

#[allow(clippy::indexing_slicing)]
let attrs = NodeAttributes::from_bits_retain(meta_raw.as_deref()[TRIE_HASH_LEN]);
Expand Down Expand Up @@ -458,6 +461,7 @@ impl Storable for Node {
}

fn serialize(&self, to: &mut [u8]) -> Result<(), ShaleError> {
trace!("[{self:p}] Serializing node");
let mut cur = Cursor::new(to);

let mut attrs = match self.root_hash.get() {
Expand Down
4 changes: 4 additions & 0 deletions firewood/src/shale/compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ use std::io::{Cursor, Write};
use std::num::NonZeroUsize;
use std::sync::RwLock;

use crate::logger::trace;

#[derive(Debug)]
pub struct CompactHeader {
payload_size: u64,
Expand Down Expand Up @@ -584,6 +586,8 @@ impl<T: Storable + Debug + 'static + PartialEq, M: CachedStore + Send + Sync> Sh
#[allow(clippy::unwrap_used)]
let addr = self.inner.write().unwrap().alloc(size)?;

trace!("{self:p} put_item at {addr} size {size}");

#[allow(clippy::unwrap_used)]
let obj = {
let inner = self.inner.read().unwrap();
Expand Down

0 comments on commit eb416aa

Please sign in to comment.