From f82d2bd6a84fdfbda58d401dead22042f8dea904 Mon Sep 17 00:00:00 2001 From: antiyro <74653697+antiyro@users.noreply.github.com> Date: Thu, 6 Jun 2024 12:53:20 +0200 Subject: [PATCH] replaced felts and hashers with types-core types (#137) Co-authored-by: jbcaron --- CHANGELOG.md | 1 + .../db/src/storage_handler/class_trie.rs | 4 +- crates/client/sync/src/commitments/classes.rs | 23 +-- .../client/sync/src/commitments/contracts.rs | 23 +-- crates/client/sync/src/commitments/events.rs | 46 ++--- crates/client/sync/src/commitments/mod.rs | 170 +++++++++++++++++- .../sync/src/commitments/transactions.rs | 45 +++-- crates/client/sync/src/l2.rs | 18 +- crates/client/sync/src/reorgs/lib.rs | 34 ---- crates/client/sync/src/reorgs/mod.rs | 35 +++- crates/client/sync/src/utils/convert.rs | 14 +- crates/client/sync/src/utils/utility.rs | 66 +------ 12 files changed, 274 insertions(+), 205 deletions(-) delete mode 100644 crates/client/sync/src/reorgs/lib.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 33ab8cc5e..3dc9fc699 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- fix(hashers): cleaned hashers using types core hashers and Felt - refactor: remove substrate block storage - feat(infra): Added boilerplate to deploy a grafana/prometheus dashboard - refacor: use db hash diff --git a/crates/client/db/src/storage_handler/class_trie.rs b/crates/client/db/src/storage_handler/class_trie.rs index 56e49b175..336cbfb42 100644 --- a/crates/client/db/src/storage_handler/class_trie.rs +++ b/crates/client/db/src/storage_handler/class_trie.rs @@ -3,7 +3,6 @@ use std::sync::{RwLockReadGuard, RwLockWriteGuard}; use bonsai_trie::id::BasicId; use bonsai_trie::BonsaiStorage; use starknet_api::core::ClassHash; -use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; use starknet_types_core::hash::Poseidon; @@ -72,10 +71,9 @@ impl ClassTrieViewMut<'_> { self.0.root_hash(bonsai_identifier::CLASS).map_err(|_| DeoxysStorageError::TrieRootError(TrieType::Class)) } - pub fn update(&mut self, updates: Vec<(&ClassHash, FieldElement)>) -> Result<(), DeoxysStorageError> { + pub fn update(&mut self, updates: Vec<(&ClassHash, Felt)>) -> Result<(), DeoxysStorageError> { for (key, value) in updates { let key = conv_class_key(key); - let value = Felt::from_bytes_be(&value.to_bytes_be()); self.0 .insert(bonsai_identifier::CLASS, &key, &value) .map_err(|_| DeoxysStorageError::StorageInsertionError(StorageType::Class))?; diff --git a/crates/client/sync/src/commitments/classes.rs b/crates/client/sync/src/commitments/classes.rs index 79254aef1..45c3305d8 100644 --- a/crates/client/sync/src/commitments/classes.rs +++ b/crates/client/sync/src/commitments/classes.rs @@ -1,14 +1,12 @@ use blockifier::state::cached_state::CommitmentStateDiff; use mc_db::storage_handler::{self, DeoxysStorageError}; -use mp_felt::Felt252Wrapper; -use mp_hashers::poseidon::PoseidonHasher; -use mp_hashers::HasherT; use rayon::prelude::*; -use starknet_ff::FieldElement; +use starknet_types_core::felt::Felt; +use starknet_types_core::hash::{Poseidon, StarkHash}; // "CONTRACT_CLASS_LEAF_V0" -const CONTRACT_CLASS_HASH_VERSION: FieldElement = - FieldElement::from_mont([9331882290187415277, 12057587991035439952, 18444375821049509847, 115292049744600508]); +const CONTRACT_CLASS_HASH_VERSION: Felt = + Felt::from_raw([115292049744600508, 18444375821049509847, 12057587991035439952, 9331882290187415277]); /// Calculates the class trie root /// @@ -21,7 +19,7 @@ const CONTRACT_CLASS_HASH_VERSION: FieldElement = /// # Returns /// /// The class root. -pub fn class_trie_root(csd: &CommitmentStateDiff, block_number: u64) -> Result { +pub fn class_trie_root(csd: &CommitmentStateDiff, block_number: u64) -> Result { let mut handler_class = storage_handler::class_trie_mut(); let updates = csd @@ -29,9 +27,9 @@ pub fn class_trie_root(csd: &CommitmentStateDiff, block_number: u64) -> Result Result Result { +pub fn contract_trie_root(csd: &CommitmentStateDiff, block_number: u64) -> Result { // NOTE: handlers implicitely acquire a lock on their respective tries // for the duration of their livetimes let mut handler_contract = storage_handler::contract_trie_mut(); @@ -73,7 +70,7 @@ pub fn contract_trie_root(csd: &CommitmentStateDiff, block_number: u64) -> Resul handler_contract.update(updates)?; handler_contract.commit(block_number)?; - Ok(handler_contract.root()?.into()) + handler_contract.root() } /// Computes the contract state leaf hash @@ -94,14 +91,12 @@ fn contract_state_leaf_hash( ) -> Result { let (class_hash, nonce) = class_hash_and_nonce(csd, contract_address)?; - let storage_root = FieldElement::from_bytes_be(&storage_root.to_bytes_be()).unwrap(); - // computes the contract state leaf hash - let contract_state_hash = PedersenHasher::hash_elements(class_hash, storage_root); - let contract_state_hash = PedersenHasher::hash_elements(contract_state_hash, nonce); - let contract_state_hash = PedersenHasher::hash_elements(contract_state_hash, FieldElement::ZERO); + let contract_state_hash = Pedersen::hash(&class_hash, &storage_root); + let contract_state_hash = Pedersen::hash(&contract_state_hash, &nonce); + let contract_state_hash = Pedersen::hash(&contract_state_hash, &Felt::ZERO); - Ok(Felt::from_bytes_be(&contract_state_hash.to_bytes_be())) + Ok(contract_state_hash) } /// Retrieves the class hash and nonce of a contract address @@ -117,7 +112,7 @@ fn contract_state_leaf_hash( fn class_hash_and_nonce( csd: &CommitmentStateDiff, contract_address: &ContractAddress, -) -> Result<(FieldElement, FieldElement), DeoxysStorageError> { +) -> Result<(Felt, Felt), DeoxysStorageError> { let class_hash = match csd.address_to_class_hash.get(contract_address) { Some(class_hash) => *class_hash, None => storage_handler::contract_class_hash().get(contract_address)?.unwrap_or_default(), @@ -126,5 +121,5 @@ fn class_hash_and_nonce( Some(nonce) => *nonce, None => storage_handler::contract_nonces().get(contract_address)?.unwrap_or_default(), }; - Ok((FieldElement::from_bytes_be(&class_hash.0.0).unwrap(), FieldElement::from_bytes_be(&nonce.0.0).unwrap())) + Ok((Felt::from_bytes_be(&class_hash.0.0), Felt::from_bytes_be(&nonce.0.0))) } diff --git a/crates/client/sync/src/commitments/events.rs b/crates/client/sync/src/commitments/events.rs index b7e13fbeb..e49a7a9b8 100644 --- a/crates/client/sync/src/commitments/events.rs +++ b/crates/client/sync/src/commitments/events.rs @@ -3,14 +3,10 @@ use bonsai_trie::databases::HashMapDb; use bonsai_trie::id::{BasicId, BasicIdBuilder}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; use mc_db::storage_handler::bonsai_identifier; -use mp_felt::Felt252Wrapper; -use mp_hashers::pedersen::PedersenHasher; -use mp_hashers::HasherT; use rayon::prelude::*; use starknet_api::transaction::Event; -use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; -use starknet_types_core::hash::Pedersen; +use starknet_types_core::hash::{Pedersen, StarkHash}; /// Calculate the hash of the event. /// @@ -20,33 +16,22 @@ use starknet_types_core::hash::Pedersen; /// /// # Returns /// -/// The event hash as `FieldElement`. -pub fn calculate_event_hash(event: &Event) -> FieldElement { +/// The event hash as `Felt`. +pub fn calculate_event_hash(event: &Event) -> Felt { let (keys_hash, data_hash) = rayon::join( || { - H::compute_hash_on_elements( - &event - .content - .keys - .iter() - .map(|key| FieldElement::from(Felt252Wrapper::from(key.0))) - .collect::>(), + Pedersen::hash_array( + &event.content.keys.iter().map(|key| Felt::from_bytes_be(&key.0.0)).collect::>(), ) }, || { - H::compute_hash_on_elements( - &event - .content - .data - .0 - .iter() - .map(|data| FieldElement::from(Felt252Wrapper::from(*data))) - .collect::>(), + Pedersen::hash_array( + &event.content.data.0.iter().map(|data| Felt::from_bytes_be(&data.0)).collect::>(), ) }, ); - let from_address = FieldElement::from(Felt252Wrapper::from(event.from_address.0.0)); - H::compute_hash_on_elements(&[from_address, keys_hash, data_hash]) + let from_address = Felt::from_bytes_be(&event.from_address.0.0.0); + Pedersen::hash_array(&[from_address, keys_hash, data_hash]) } /// Calculate the event commitment in memory using HashMapDb (which is more efficient for this @@ -58,11 +43,10 @@ pub fn calculate_event_hash(event: &Event) -> FieldElement { /// /// # Returns /// -/// The event commitment as `Felt252Wrapper`. -pub fn memory_event_commitment(events: &[Event]) -> Result { - // TODO @cchudant refacto/optimise this function +/// The event commitment as `Felt`. +pub fn memory_event_commitment(events: &[Event]) -> Result { if events.is_empty() { - return Ok(Felt252Wrapper::ZERO); + return Ok(Felt::ZERO); } let config = BonsaiStorageConfig::default(); @@ -72,12 +56,12 @@ pub fn memory_event_commitment(events: &[Event]) -> Result).collect::>(); + let events = events.par_iter().map(calculate_event_hash).collect::>(); // once event hashes have finished computing, they are inserted into the local Bonsai db for (i, event_hash) in events.into_iter().enumerate() { let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(event_hash)); + let value = event_hash; bonsai_storage.insert(identifier, key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } @@ -93,5 +77,5 @@ pub fn memory_event_commitment(events: &[Event]) -> Result ((Felt, Vec), Felt) { + let (commitment_tx, commitment_event) = rayon::join( + || memory_transaction_commitment(transactions, chain_id, block_number), + || memory_event_commitment(events), + ); + ( + commitment_tx.expect("Failed to calculate transaction commitment"), + commitment_event.expect("Failed to calculate event commitment"), + ) +} + +/// Aggregates all the changes from last state update in a way that is easy to access +/// when computing the state root +/// +/// * `state_update`: The last state update fetched from the sequencer +pub fn build_commitment_state_diff(state_update: &StateUpdate) -> CommitmentStateDiff { + let mut commitment_state_diff = CommitmentStateDiff { + address_to_class_hash: IndexMap::new(), + address_to_nonce: IndexMap::new(), + storage_updates: IndexMap::new(), + class_hash_to_compiled_class_hash: IndexMap::new(), + }; + + for DeployedContractItem { address, class_hash } in state_update.state_diff.deployed_contracts.iter() { + let address = ContractAddress::from_field_element(address); + let class_hash = if address == ContractAddress::from_field_element(FieldElement::ZERO) { + // System contracts doesnt have class hashes + ClassHash::from_field_element(FieldElement::ZERO) + } else { + ClassHash::from_field_element(class_hash) + }; + commitment_state_diff.address_to_class_hash.insert(address, class_hash); + } + + for ReplacedClassItem { contract_address, class_hash } in state_update.state_diff.replaced_classes.iter() { + let address = ContractAddress::from_field_element(contract_address); + let class_hash = ClassHash::from_field_element(class_hash); + commitment_state_diff.address_to_class_hash.insert(address, class_hash); + } + + for DeclaredClassItem { class_hash, compiled_class_hash } in state_update.state_diff.declared_classes.iter() { + let class_hash = ClassHash::from_field_element(class_hash); + let compiled_class_hash = CompiledClassHash::from_field_element(compiled_class_hash); + commitment_state_diff.class_hash_to_compiled_class_hash.insert(class_hash, compiled_class_hash); + } + + for NonceUpdate { contract_address, nonce } in state_update.state_diff.nonces.iter() { + let contract_address = ContractAddress::from_field_element(contract_address); + let nonce_value = Nonce::from_field_element(nonce); + commitment_state_diff.address_to_nonce.insert(contract_address, nonce_value); + } + + for ContractStorageDiffItem { address, storage_entries } in state_update.state_diff.storage_diffs.iter() { + let contract_address = ContractAddress::from_field_element(address); + let mut storage_map = IndexMap::new(); + for StorageEntry { key, value } in storage_entries.iter() { + let key = StorageKey::from_field_element(key); + let value = StarkFelt::from_field_element(value); + storage_map.insert(key, value); + } + commitment_state_diff.storage_updates.insert(contract_address, storage_map); + } + + commitment_state_diff +} + +/// Calculate state commitment hash value. +/// +/// The state commitment is the digest that uniquely (up to hash collisions) encodes the state. +/// It combines the roots of two binary Merkle-Patricia tries of height 251 using Poseidon/Pedersen +/// hashers. +/// +/// # Arguments +/// +/// * `contracts_trie_root` - The root of the contracts trie. +/// * `classes_trie_root` - The root of the classes trie. +/// +/// # Returns +/// +/// The state commitment as a `Felt`. +pub fn calculate_state_root(contracts_trie_root: Felt, classes_trie_root: Felt) -> Felt { + if classes_trie_root == Felt::ZERO { + contracts_trie_root + } else { + Poseidon::hash_array(&[STARKNET_STATE_PREFIX, contracts_trie_root, classes_trie_root]) + } +} + +/// Update the state commitment hash value. +/// +/// The state commitment is the digest that uniquely (up to hash collisions) encodes the state. +/// It combines the roots of two binary Merkle-Patricia tries of height 251 using Poseidon/Pedersen +/// hashers. +/// +/// # Arguments +/// +/// * `CommitmentStateDiff` - The commitment state diff inducing unprocessed state changes. +/// * `BonsaiDb` - The database responsible for storing computing the state tries. +/// +/// +/// The updated state root as a `Felt`. +pub fn csd_calculate_state_root(csd: CommitmentStateDiff, block_number: u64) -> Felt { + // Update contract and its storage tries + let (contract_trie_root, class_trie_root) = rayon::join( + || contract_trie_root(&csd, block_number).expect("Failed to compute contract root"), + || class_trie_root(&csd, block_number).expect("Failed to compute class root"), + ); + calculate_state_root(contract_trie_root, class_trie_root) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_starknet_state_version() { + assert_eq!(STARKNET_STATE_PREFIX, Felt::from_bytes_be_slice("STARKNET_STATE_V0".as_bytes())); + } +} diff --git a/crates/client/sync/src/commitments/transactions.rs b/crates/client/sync/src/commitments/transactions.rs index 5ea83c8f2..c82d68859 100644 --- a/crates/client/sync/src/commitments/transactions.rs +++ b/crates/client/sync/src/commitments/transactions.rs @@ -5,13 +5,11 @@ use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; use mc_db::storage_handler::bonsai_identifier; use mp_felt::Felt252Wrapper; use mp_hashers::pedersen::PedersenHasher; -use mp_hashers::HasherT; use mp_transactions::compute_hash::ComputeTransactionHash; use rayon::prelude::*; use starknet_api::transaction::Transaction; -use starknet_ff::FieldElement; use starknet_types_core::felt::Felt; -use starknet_types_core::hash::Pedersen; +use starknet_types_core::hash::{Pedersen, StarkHash}; /// Compute the combined hash of the transaction hash and the signature. /// @@ -26,11 +24,11 @@ use starknet_types_core::hash::Pedersen; /// # Returns /// /// The transaction hash with signature. -pub fn calculate_transaction_hash_with_signature( +pub fn calculate_transaction_hash_with_signature( transaction: &Transaction, chain_id: Felt252Wrapper, block_number: u64, -) -> FieldElement { +) -> Felt { let include_signature = block_number >= 61394; let (signature_hash, tx_hash) = rayon::join( @@ -39,20 +37,16 @@ pub fn calculate_transaction_hash_with_signature( // Include signatures for Invoke transactions or for all transactions let signature = invoke_tx.signature(); - H::compute_hash_on_elements( - &signature.0.iter().map(|x| Felt252Wrapper::from(*x).into()).collect::>(), - ) + Pedersen::hash_array(&signature.0.iter().map(|x| Felt::from_bytes_be(&x.0)).collect::>()) } Transaction::Declare(declare_tx) => { // Include signatures for Declare transactions if the block number is greater than 61394 (mainnet) if include_signature { let signature = declare_tx.signature(); - H::compute_hash_on_elements( - &signature.0.iter().map(|x| Felt252Wrapper::from(*x).into()).collect::>(), - ) + Pedersen::hash_array(&signature.0.iter().map(|x| Felt::from_bytes_be(&x.0)).collect::>()) } else { - H::compute_hash_on_elements(&[]) + Pedersen::hash_array(&[]) } } Transaction::DeployAccount(deploy_account_tx) => { @@ -61,20 +55,21 @@ pub fn calculate_transaction_hash_with_signature( if include_signature { let signature = deploy_account_tx.signature(); - H::compute_hash_on_elements( - &signature.0.iter().map(|x| Felt252Wrapper::from(*x).into()).collect::>(), - ) + Pedersen::hash_array(&signature.0.iter().map(|x| Felt::from_bytes_be(&x.0)).collect::>()) } else { - H::compute_hash_on_elements(&[]) + Pedersen::hash_array(&[]) } } - Transaction::L1Handler(_) => H::compute_hash_on_elements(&[]), - _ => H::compute_hash_on_elements(&[]), + Transaction::L1Handler(_) => Pedersen::hash_array(&[]), + _ => Pedersen::hash_array(&[]), + }, + || { + Felt252Wrapper::from(transaction.compute_hash::(chain_id, false, Some(block_number)).0) + .into() }, - || Felt252Wrapper::from(transaction.compute_hash::(chain_id, false, Some(block_number)).0).into(), ); - H::hash_elements(tx_hash, signature_hash) + Pedersen::hash(&tx_hash, &signature_hash) } /// Calculate the transaction commitment in memory using HashMapDb (which is more efficient for this @@ -88,12 +83,12 @@ pub fn calculate_transaction_hash_with_signature( /// /// # Returns /// -/// The transaction commitment as `Felt252Wrapper`. +/// The transaction commitment as `Felt`. pub fn memory_transaction_commitment( transactions: &[Transaction], chain_id: Felt252Wrapper, block_number: u64, -) -> Result<(Felt252Wrapper, Vec), String> { +) -> Result<(Felt, Vec), String> { // TODO @cchudant refacto/optimise this function let config = BonsaiStorageConfig::default(); let bonsai_db = HashMapDb::::default(); @@ -104,13 +99,13 @@ pub fn memory_transaction_commitment( // transaction hashes are computed in parallel let txs = transactions .par_iter() - .map(|tx| calculate_transaction_hash_with_signature::(tx, chain_id, block_number)) + .map(|tx| calculate_transaction_hash_with_signature(tx, chain_id, block_number)) .collect::>(); // once transaction hashes have finished computing, they are inserted into the local Bonsai db for (i, &tx_hash) in txs.iter().enumerate() { let key = BitVec::from_vec(i.to_be_bytes().to_vec()); - let value = Felt::from(Felt252Wrapper::from(tx_hash)); + let value = tx_hash; bonsai_storage.insert(identifier, key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); } @@ -120,5 +115,5 @@ pub fn memory_transaction_commitment( bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); let root_hash = bonsai_storage.root_hash(identifier).expect("Failed to get root hash"); - Ok((Felt252Wrapper::from(root_hash), txs)) + Ok((root_hash, txs)) } diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index f4b900b22..e5efd5a34 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -14,16 +14,16 @@ use mc_db::DeoxysBackend; use mc_telemetry::{TelemetryHandle, VerbosityLevel}; use mp_block::{BlockId, BlockTag, DeoxysBlock}; use mp_felt::{trim_hash, FeltWrapper}; -use serde::Deserialize; -use starknet_api::hash::{StarkFelt, StarkHash}; +use starknet_api::hash::StarkFelt; use starknet_core::types::StateUpdate; use starknet_providers::sequencer::models::StateUpdateWithBlock; use starknet_providers::{ProviderError, SequencerGatewayProvider}; +use starknet_types_core::felt::Felt; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinSet; use tokio::time::Duration; -use crate::commitments::lib::{build_commitment_state_diff, csd_calculate_state_root}; +use crate::commitments::{build_commitment_state_diff, csd_calculate_state_root}; use crate::convert::convert_block; use crate::fetch::fetchers::L2BlockAndUpdates; use crate::fetch::l2_fetch_task; @@ -59,11 +59,11 @@ pub enum L2SyncError { } /// Contains the latest Starknet verified state on L2 -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone)] pub struct L2StateUpdate { pub block_number: u64, - pub global_root: StarkHash, - pub block_hash: StarkHash, + pub global_root: Felt, + pub block_hash: Felt, } fn store_new_block(block: &DeoxysBlock) -> Result<(), DeoxysStorageError> { @@ -108,7 +108,7 @@ async fn l2_verify_and_apply_task( }) .await?; - if global_state_root != state_root { + if global_state_root.0 != state_root.to_bytes_be() { // TODO(fault tolerance): we should have a single rocksdb transaction for the whole l2 update. // let prev_block = block_n.checked_sub(1).expect("no block to revert to"); @@ -377,9 +377,9 @@ async fn update_sync_metrics( } /// Verify and update the L2 state according to the latest state update -pub fn verify_l2(block_number: u64, state_update: &StateUpdate) -> anyhow::Result { +pub fn verify_l2(block_number: u64, state_update: &StateUpdate) -> anyhow::Result { let csd = build_commitment_state_diff(state_update); let state_root = csd_calculate_state_root(csd, block_number); - Ok(state_root.into()) + Ok(state_root) } diff --git a/crates/client/sync/src/reorgs/lib.rs b/crates/client/sync/src/reorgs/lib.rs deleted file mode 100644 index 19fa2cab7..000000000 --- a/crates/client/sync/src/reorgs/lib.rs +++ /dev/null @@ -1,34 +0,0 @@ -use mc_db::DeoxysBackend; -use starknet_providers::sequencer::models::Block as StarknetBlock; - -/// Check for a reorg on Starknet and fix the current state if detected. -/// -/// On Starknet with the current system relying on a single sequencer it's rare to detect a reorg, -/// but if the L1 reorgs we must handle it the following way: -/// -/// 1. The last fetched block parent hash is not equal to the last synced block by Deoxys: a reorg -/// is detected. -/// 2. We remove the last synced substrate digest and the associated classes/state_update we stored -/// until we reach the last common ancestor. -/// -/// ### Arguments -/// -/// * `block` - The last fetched block from the sequencer (before beeing converted). -/// -/// ### Returns -/// This function will return a `Bool` returning `true` if a reorg was detected and `false` if not. -pub async fn reorg(block: StarknetBlock) -> bool { - let last_synced_block_hash = DeoxysBackend::meta().get_latest_block_hash_and_number().unwrap().0; - if block.parent_block_hash != last_synced_block_hash { - let mut new_lsbh = last_synced_block_hash; - while block.parent_block_hash != new_lsbh { - // 1. Remove the last synced block in the digest - // 2. Remove all the downloaded stuff from the state updates - new_lsbh = DeoxysBackend::meta().get_latest_block_hash_and_number().unwrap().0; - } - // 3. Revert the state commitment tries to the correct block number - true - } else { - false - } -} diff --git a/crates/client/sync/src/reorgs/mod.rs b/crates/client/sync/src/reorgs/mod.rs index 965f28e93..19fa2cab7 100644 --- a/crates/client/sync/src/reorgs/mod.rs +++ b/crates/client/sync/src/reorgs/mod.rs @@ -1 +1,34 @@ -pub mod lib; +use mc_db::DeoxysBackend; +use starknet_providers::sequencer::models::Block as StarknetBlock; + +/// Check for a reorg on Starknet and fix the current state if detected. +/// +/// On Starknet with the current system relying on a single sequencer it's rare to detect a reorg, +/// but if the L1 reorgs we must handle it the following way: +/// +/// 1. The last fetched block parent hash is not equal to the last synced block by Deoxys: a reorg +/// is detected. +/// 2. We remove the last synced substrate digest and the associated classes/state_update we stored +/// until we reach the last common ancestor. +/// +/// ### Arguments +/// +/// * `block` - The last fetched block from the sequencer (before beeing converted). +/// +/// ### Returns +/// This function will return a `Bool` returning `true` if a reorg was detected and `false` if not. +pub async fn reorg(block: StarknetBlock) -> bool { + let last_synced_block_hash = DeoxysBackend::meta().get_latest_block_hash_and_number().unwrap().0; + if block.parent_block_hash != last_synced_block_hash { + let mut new_lsbh = last_synced_block_hash; + while block.parent_block_hash != new_lsbh { + // 1. Remove the last synced block in the digest + // 2. Remove all the downloaded stuff from the state updates + new_lsbh = DeoxysBackend::meta().get_latest_block_hash_and_number().unwrap().0; + } + // 3. Revert the state commitment tries to the correct block number + true + } else { + false + } +} diff --git a/crates/client/sync/src/utils/convert.rs b/crates/client/sync/src/utils/convert.rs index fabe11399..d8f730635 100644 --- a/crates/client/sync/src/utils/convert.rs +++ b/crates/client/sync/src/utils/convert.rs @@ -22,8 +22,9 @@ use starknet_providers::sequencer::models::state_update::{ DeclaredContract, DeployedContract, StateDiff as StateDiffProvider, StorageDiff as StorageDiffProvider, }; use starknet_providers::sequencer::models::{self as p, StateUpdate as StateUpdateProvider}; +use starknet_types_core::felt::Felt; -use crate::commitments::lib::calculate_tx_and_event_commitments; +use crate::commitments::calculate_tx_and_event_commitments; use crate::l2::L2SyncError; /// Compute heavy, this should only be called in a rayon ctx @@ -43,6 +44,11 @@ pub fn convert_block(block: p::Block, chain_id: StarkFelt) -> Result = txs_hashes.into_iter().map(Felt252Wrapper::from).map(Into::into).collect(); + let protocol_version = starknet_version(&block.starknet_version); let l1_gas_price = resource_price(block.l1_gas_price, block.l1_data_gas_price); let l1_da_mode = l1_da_mode(block.l1_da_mode); @@ -80,7 +86,7 @@ pub fn convert_block(block: p::Block, chain_id: StarkFelt) -> Result ((StarkFelt, Vec), StarkFelt) { +) -> ((Felt, Vec), Felt) { let ((commitment_tx, txs_hashes), commitment_event) = calculate_tx_and_event_commitments(transactions, events, chain_id.into(), block_number); - ((commitment_tx.into(), txs_hashes), commitment_event.into()) + ((commitment_tx, txs_hashes), commitment_event) } fn felt(field_element: starknet_ff::FieldElement) -> starknet_api::hash::StarkFelt { diff --git a/crates/client/sync/src/utils/utility.rs b/crates/client/sync/src/utils/utility.rs index f522ac526..e415458f2 100644 --- a/crates/client/sync/src/utils/utility.rs +++ b/crates/client/sync/src/utils/utility.rs @@ -1,18 +1,13 @@ //! Utility functions for Deoxys. -use std::thread::sleep; -use std::time::Duration; - use anyhow::{bail, Context}; use ethers::types::{I256, U256}; use rand::seq::SliceRandom; use rand::thread_rng; -use reqwest::header; -use serde_json::{json, Value}; +use serde_json::Value; use starknet_api::hash::StarkFelt; use crate::l1::{L1StateUpdate, LogStateUpdate}; -use crate::l2::L2StateUpdate; // static CONFIG: OnceCell = OnceCell::new(); @@ -37,65 +32,6 @@ use crate::l2::L2StateUpdate; // CONFIG.get().expect("CONFIG not initialized").feeder_gateway.clone() // } -// TODO: secure the auto calls here - -pub async fn get_state_update_at(rpc_port: u16, block_number: u64) -> anyhow::Result { - let client = reqwest::Client::new(); - let url = format!("http://localhost:{}", rpc_port); - - let request = json!({ - "id": 1, - "jsonrpc": "2.0", - "method": "starknet_getStateUpdate", - "params": [{ "block_number": block_number }] - }); - let payload = serde_json::to_vec(&request)?; - - const MAX_ATTEMPTS: u8 = 3; - const RETRY_DELAY: Duration = Duration::from_secs(5); - let mut attempts = 0; - - while attempts < MAX_ATTEMPTS { - let response = client - .post(&url) - .header(header::CONTENT_TYPE, "application/json") - .header(header::ACCEPT, "application/json") - .body(payload.clone()) - .send() - .await; - - match response { - Ok(response) => { - let json_response = response.json::().await; - match json_response { - Ok(json_response) => { - if let Some(result) = json_response.get("result") { - let state_update: L2StateUpdate = serde_json::from_value(result.clone())?; - return Ok(state_update); - } else { - eprintln!("No result found in response"); - attempts += 1; - sleep(RETRY_DELAY); - } - } - Err(e) => { - eprintln!("Failed to parse response as JSON: {}", e); - attempts += 1; - sleep(RETRY_DELAY); - } - } - } - Err(e) => { - eprintln!("Request failed: {}, retrying...", e); - attempts += 1; - sleep(RETRY_DELAY); - } - } - } - - bail!("Maximum retries exceeded") -} - /// Returns a random Pokémon name. pub async fn get_random_pokemon_name() -> Result> { let res = reqwest::get("https://pokeapi.co/api/v2/pokemon/?limit=1000").await?;