Skip to content

Commit

Permalink
Merge pull request #26 from cchudant/rayon
Browse files Browse the repository at this point in the history
L2 commitments: hash commitments & l2 sync parallelism
  • Loading branch information
antiyro authored Mar 26, 2024
2 parents 96c12f5 + 914647c commit ff8dc52
Show file tree
Hide file tree
Showing 11 changed files with 182 additions and 130 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ git # Deoxys Changelog

## Next release

- perf(l2 sync): parallelize commitment computation and refactor part of l2 io sync
- refactor: rpc methods and removed rpc-core
- feat: add an optional TUI dashboard
- feat(bonsai): Bumped bonsai lib to latest opti
Expand Down
7 changes: 4 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions crates/client/sync/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ mp-felt = { workspace = true }
mp-hashers = { workspace = true }
mp-storage = { workspace = true, default-features = true }
mp-transactions = { workspace = true, features = ["client"] }
rayon = "1.10.0"
starknet-types-core = { workspace = true, default-features = false, features = [
"hash",
"parity-scale-codec",
Expand Down
19 changes: 9 additions & 10 deletions crates/client/sync/src/commitments/contracts.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
use std::sync::{Arc, Mutex};

use bitvec::order::Msb0;
use bitvec::vec::BitVec;
use bitvec::view::BitView;
Expand Down Expand Up @@ -42,20 +40,21 @@ pub struct ContractLeafParams {
pub fn update_storage_trie(
contract_address: &ContractAddress,
storage_updates: &IndexMap<StorageKey, StarkFelt>,
bonsai_contract_storage: &Arc<Mutex<BonsaiStorage<BasicId, BonsaiDb, Pedersen>>>,
bonsai_contract_storage: &mut BonsaiStorage<BasicId, BonsaiDb, Pedersen>,
) {
let mut bonsai_storage = bonsai_contract_storage.lock().unwrap();
let identifier = identifier(contract_address);
bonsai_storage.init_tree(identifier).expect("Failed to init tree");
bonsai_contract_storage.init_tree(identifier).expect("Failed to init tree");

// Insert new storage changes
storage_updates.into_iter().map(|(key, value)| convert_storage((*key, *value))).for_each(|(key, value)| {
bonsai_storage.insert(identifier, &key, &value.into()).expect("Failed to insert storage update into trie");
});
for (key, value) in storage_updates {
let (key, value) = convert_storage(*key, *value);
bonsai_contract_storage
.insert(identifier, &key, &value.into())
.expect("Failed to insert storage update into trie");
}
}

fn convert_storage(storage: (StorageKey, StarkFelt)) -> (BitVec<u8, Msb0>, Felt252Wrapper) {
let (storage_key, storage_value) = storage;
fn convert_storage(storage_key: StorageKey, storage_value: StarkFelt) -> (BitVec<u8, Msb0>, Felt252Wrapper) {
let key = Felt252Wrapper::from(storage_key.0.0).0.to_bytes_be().view_bits()[5..].to_owned();
let value = Felt252Wrapper::from(storage_value);

Expand Down
20 changes: 6 additions & 14 deletions crates/client/sync/src/commitments/events.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig};
use mp_felt::Felt252Wrapper;
use mp_hashers::pedersen::PedersenHasher;
use mp_hashers::HasherT;
use rayon::prelude::*;
use starknet_api::transaction::Event;
use starknet_ff::FieldElement;
use starknet_types_core::felt::Felt;
use starknet_types_core::hash::Pedersen;
use tokio::task::{spawn_blocking, JoinSet};

use crate::utils::constant::bonsai_identifier;

Expand Down Expand Up @@ -54,7 +54,7 @@ pub fn calculate_event_hash<H: HasherT>(event: &Event) -> FieldElement {
/// # Returns
///
/// The event commitment as `Felt252Wrapper`.
pub async fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper, String> {
pub fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper, String> {
// TODO @cchudant refacto/optimise this function
if events.is_empty() {
return Ok(Felt252Wrapper::ZERO);
Expand All @@ -67,14 +67,10 @@ pub async fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper,
let identifier = bonsai_identifier::EVENT;

// event hashes are computed in parallel
let mut task_set = JoinSet::new();
events.iter().cloned().enumerate().for_each(|(i, event)| {
task_set.spawn(async move { (i, calculate_event_hash::<PedersenHasher>(&event)) });
});
let events = events.par_iter().map(calculate_event_hash::<PedersenHasher>).collect::<Vec<_>>();

// once event hashes have finished computing, they are inserted into the local Bonsai db
while let Some(res) = task_set.join_next().await {
let (i, event_hash) = res.map_err(|e| format!("Failed to retrieve event hash: {e}"))?;
for (i, event_hash) in events.into_iter().enumerate() {
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(event_hash));
bonsai_storage.insert(identifier, key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
Expand All @@ -89,12 +85,8 @@ pub async fn memory_event_commitment(events: &[Event]) -> Result<Felt252Wrapper,
let id = id_builder.new_id();

// run in a blocking-safe thread to avoid starving the thread pool
let root_hash = spawn_blocking(move || {
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
bonsai_storage.root_hash(identifier).expect("Failed to get root hash")
})
.await
.map_err(|e| format!("Failed to computed event root hash: {e}"))?;
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
let root_hash = bonsai_storage.root_hash(identifier).expect("Failed to get root hash");

Ok(Felt252Wrapper::from(root_hash))
}
100 changes: 65 additions & 35 deletions crates/client/sync/src/commitments/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::sync::{Arc, Mutex, MutexGuard};
use std::sync::{Arc, Mutex};

use bitvec::order::Msb0;
use bitvec::vec::BitVec;
Expand All @@ -17,6 +17,7 @@ use mp_hashers::poseidon::PoseidonHasher;
use mp_hashers::HasherT;
use mp_storage::StarknetStorageSchemaVersion::Undefined;
use mp_transactions::Transaction;
use rayon::prelude::*;
use sp_core::H256;
use sp_runtime::generic::{Block, Header};
use sp_runtime::traits::BlakeTwo256;
Expand All @@ -26,7 +27,6 @@ use starknet_api::hash::StarkFelt;
use starknet_api::state::StorageKey;
use starknet_api::transaction::Event;
use starknet_types_core::hash::{Pedersen, Poseidon};
use tokio::join;

use super::classes::calculate_class_commitment_leaf_hash;
use super::contracts::{identifier, update_storage_trie, ContractLeafParams};
Expand All @@ -47,15 +47,16 @@ use crate::utils::constant::bonsai_identifier;
/// # Returns
///
/// The transaction and the event commitment as `Felt252Wrapper`.
pub async fn calculate_commitments(
pub fn calculate_commitments(
transactions: &[Transaction],
events: &[Event],
chain_id: Felt252Wrapper,
block_number: u64,
) -> (Felt252Wrapper, Felt252Wrapper) {
let (commitment_tx, commitment_event) =
join!(memory_transaction_commitment(transactions, chain_id, block_number), memory_event_commitment(events));

let (commitment_tx, commitment_event) = rayon::join(
|| memory_transaction_commitment(transactions, chain_id, block_number),
|| memory_event_commitment(events),
);
(
commitment_tx.expect("Failed to calculate transaction commitment"),
commitment_event.expect("Failed to calculate event commitment"),
Expand Down Expand Up @@ -173,19 +174,26 @@ pub fn update_state_root(
substrate_block_hash: Option<H256>,
) -> Felt252Wrapper {
// Update contract and its storage tries
let contract_trie_root = contract_trie_root(
&csd,
overrides,
bonsai_contract.lock().unwrap(),
bonsai_contract_storage,
block_number,
substrate_block_hash,
)
.expect("Failed to compute contract root");
let (contract_trie_root, class_trie_root) = rayon::join(
|| {
let mut bonsai_contract = bonsai_contract.lock().unwrap();
let mut bonsai_contract_storage = bonsai_contract_storage.lock().unwrap();

// Update class trie
let class_trie_root =
class_trie_root(&csd, bonsai_class.lock().unwrap(), block_number).expect("Failed to compute class root");
contract_trie_root(
&csd,
overrides,
&mut bonsai_contract,
&mut bonsai_contract_storage,
block_number,
substrate_block_hash,
)
.expect("Failed to compute contract root")
},
|| {
let mut bonsai_class = bonsai_class.lock().unwrap();
class_trie_root(&csd, &mut bonsai_class, block_number).expect("Failed to compute class root")
},
);

calculate_state_root::<PoseidonHasher>(contract_trie_root, class_trie_root)
}
Expand All @@ -205,34 +213,52 @@ pub fn update_state_root(
fn contract_trie_root(
csd: &CommitmentStateDiff,
overrides: Arc<OverrideHandle<Block<Header<u32, BlakeTwo256>, OpaqueExtrinsic>>>,
mut bonsai_contract: MutexGuard<BonsaiStorage<BasicId, BonsaiDb, Pedersen>>,
bonsai_contract_storage: Arc<Mutex<BonsaiStorage<BasicId, BonsaiDb, Pedersen>>>,
bonsai_contract: &mut BonsaiStorage<BasicId, BonsaiDb, Pedersen>,
bonsai_contract_storage: &mut BonsaiStorage<BasicId, BonsaiDb, Pedersen>,
block_number: u64,
maybe_block_hash: Option<H256>,
) -> Result<Felt252Wrapper, BonsaiStorageError<BonsaiDbError>> {
let identifier = bonsai_identifier::CONTRACT;
bonsai_contract.init_tree(identifier)?;

let start1 = std::time::Instant::now();

let start = std::time::Instant::now();
// First we insert the contract storage changes
// TODO: @cchudant parallelize this loop
for (contract_address, updates) in csd.storage_updates.iter() {
update_storage_trie(contract_address, updates, &bonsai_contract_storage);
update_storage_trie(contract_address, updates, bonsai_contract_storage);
}
log::debug!("contract_trie_root update_storage_trie: {:?}", std::time::Instant::now() - start);

// Then we commit them
bonsai_contract_storage.lock().unwrap().commit(BasicId::new(block_number))?;
let start = std::time::Instant::now();
bonsai_contract_storage.commit(BasicId::new(block_number))?;
log::debug!("contract_trie_root bonsai_contract_storage.commit: {:?}", std::time::Instant::now() - start);

let start = std::time::Instant::now();
// Then we compute the leaf hashes retrieving the corresponding storage root
// TODO: @cchudant parallelize this loop
for contract_address in csd.storage_updates.iter() {
let class_commitment_leaf_hash =
contract_state_leaf_hash(csd, &overrides, contract_address.0, maybe_block_hash, &bonsai_contract_storage)?;
let bonsai_contract_storage = &*bonsai_contract_storage; // downgrade `&mut` to `&`
let updates = csd
.storage_updates
.iter()
.par_bridge()
.map(|contract_address| {
contract_state_leaf_hash(csd, &overrides, contract_address.0, maybe_block_hash, bonsai_contract_storage)
})
.collect::<Result<Vec<_>, BonsaiStorageError<BonsaiDbError>>>()?;
log::debug!("contract_trie_root updates: {:?}", std::time::Instant::now() - start);

let start = std::time::Instant::now();
for (contract_address, class_commitment_leaf_hash) in csd.storage_updates.iter().zip(updates) {
let key = key(contract_address.0.0.0);
bonsai_contract.insert(identifier, &key, &class_commitment_leaf_hash.into())?;
}
log::debug!("contract_trie_root bonsai_contract.commit: {:?}", std::time::Instant::now() - start);

let start = std::time::Instant::now();
bonsai_contract.commit(BasicId::new(block_number))?;
log::debug!("contract_trie_root bonsai_contract.commit: {:?}", std::time::Instant::now() - start);
log::debug!("contract_trie_root: {:?}", std::time::Instant::now() - start1);
Ok(bonsai_contract.root_hash(identifier)?.into())
}

Expand All @@ -241,11 +267,10 @@ fn contract_state_leaf_hash(
overrides: &OverrideHandle<Block<Header<u32, BlakeTwo256>, OpaqueExtrinsic>>,
contract_address: &ContractAddress,
maybe_block_hash: Option<H256>,
bonsai_contract_storage: &Arc<Mutex<BonsaiStorage<BasicId, BonsaiDb, Pedersen>>>,
bonsai_contract_storage: &BonsaiStorage<BasicId, BonsaiDb, Pedersen>,
) -> Result<Felt252Wrapper, BonsaiStorageError<BonsaiDbError>> {
let identifier = identifier(contract_address);
let storage_root =
bonsai_contract_storage.lock().unwrap().root_hash(identifier).expect("Failed to get root hash").into();
let storage_root = bonsai_contract_storage.root_hash(identifier)?.into();

let nonce =
Felt252Wrapper::from(*csd.address_to_nonce.get(contract_address).unwrap_or(&Felt252Wrapper::ZERO.into()));
Expand Down Expand Up @@ -288,16 +313,21 @@ fn class_hash(
/// The class root.
fn class_trie_root(
csd: &CommitmentStateDiff,
mut bonsai_class: MutexGuard<BonsaiStorage<BasicId, BonsaiDb, Poseidon>>,
bonsai_class: &mut BonsaiStorage<BasicId, BonsaiDb, Poseidon>,
block_number: u64,
) -> Result<Felt252Wrapper, BonsaiStorageError<BonsaiDbError>> {
let identifier = bonsai_identifier::CLASS;
bonsai_class.init_tree(identifier)?;

// TODO: @cchudant parallelize this loop
for (class_hash, compiled_class_hash) in csd.class_hash_to_compiled_class_hash.iter() {
let class_commitment_leaf_hash =
calculate_class_commitment_leaf_hash::<PoseidonHasher>(Felt252Wrapper::from(compiled_class_hash.0));
let updates = csd
.class_hash_to_compiled_class_hash
.values()
.par_bridge()
.map(|compiled_class_hash| {
calculate_class_commitment_leaf_hash::<PoseidonHasher>(Felt252Wrapper::from(compiled_class_hash.0))
})
.collect::<Vec<_>>();
for (class_hash, class_commitment_leaf_hash) in csd.class_hash_to_compiled_class_hash.keys().zip(updates) {
let key = key(class_hash.0);
bonsai_class.insert(identifier, key.as_bitslice(), &class_commitment_leaf_hash.into())?;
}
Expand Down
26 changes: 9 additions & 17 deletions crates/client/sync/src/commitments/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ use mp_hashers::pedersen::PedersenHasher;
use mp_hashers::HasherT;
use mp_transactions::compute_hash::ComputeTransactionHash;
use mp_transactions::Transaction;
use rayon::prelude::*;
use starknet_ff::FieldElement;
use starknet_types_core::felt::Felt;
use starknet_types_core::hash::Pedersen;
use tokio::task::{spawn_blocking, JoinSet};

use crate::utils::constant::bonsai_identifier;

Expand Down Expand Up @@ -66,7 +66,7 @@ where
/// # Returns
///
/// The transaction commitment as `Felt252Wrapper`.
pub async fn memory_transaction_commitment(
pub fn memory_transaction_commitment(
transactions: &[Transaction],
chain_id: Felt252Wrapper,
block_number: u64,
Expand All @@ -79,16 +79,13 @@ pub async fn memory_transaction_commitment(
let identifier = bonsai_identifier::TRANSACTION;

// transaction hashes are computed in parallel
let mut task_set = JoinSet::new();
transactions.iter().cloned().enumerate().for_each(|(i, tx)| {
task_set.spawn(async move {
(i, calculate_transaction_hash_with_signature::<PedersenHasher>(&tx, chain_id, block_number))
});
});
let txs = transactions
.par_iter()
.map(|tx| calculate_transaction_hash_with_signature::<PedersenHasher>(tx, chain_id, block_number))
.collect::<Vec<_>>();

// once transaction hashes have finished computing, they are inserted into the local Bonsai db
while let Some(res) = task_set.join_next().await {
let (i, tx_hash) = res.map_err(|e| format!("Failed to retrieve transaction hash: {e}"))?;
for (i, tx_hash) in txs.into_iter().enumerate() {
let key = BitVec::from_vec(i.to_be_bytes().to_vec());
let value = Felt::from(Felt252Wrapper::from(tx_hash));
bonsai_storage.insert(identifier, key.as_bitslice(), &value).expect("Failed to insert into bonsai storage");
Expand All @@ -97,13 +94,8 @@ pub async fn memory_transaction_commitment(
let mut id_builder = BasicIdBuilder::new();
let id = id_builder.new_id();

// run in a blocking-safe thread to avoid starving the thread pool
let root_hash = spawn_blocking(move || {
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
bonsai_storage.root_hash(identifier).expect("Failed to get root hash")
})
.await
.map_err(|e| format!("Failed to computed transaction root hash: {e}"))?;
bonsai_storage.commit(id).expect("Failed to commit to bonsai storage");
let root_hash = bonsai_storage.root_hash(identifier).expect("Failed to get root hash");

Ok(Felt252Wrapper::from(root_hash))
}
Loading

0 comments on commit ff8dc52

Please sign in to comment.