From c547b5f11a7e932e91a0c9187fda47a094dd80f9 Mon Sep 17 00:00:00 2001 From: jbcaron Date: Wed, 20 Mar 2024 14:17:00 +0100 Subject: [PATCH 1/6] feat: :sparkles: add parameters --- crates/client/sync/src/l2.rs | 36 +++++++++++++++++++-------------- crates/node/src/commands/run.rs | 24 +++++++++++++++++----- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 1a9b1e425..5ccf3a58f 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -98,6 +98,8 @@ pub struct FetchConfig { pub sound: bool, /// The L1 contract core address pub l1_core_address: H160, + /// Whether to check the root of the state update + pub verify: bool, } /// The configuration of the senders responsible for sending blocks and state @@ -216,21 +218,25 @@ pub async fn sync( let block_hash = block_hash_substrate(client.as_ref(), block_n - 1); let state_update = { - let overrides = Arc::clone(overrides); - let bonsai_contract = Arc::clone(bonsai_contract); - let bonsai_contract_storage = Arc::clone(bonsai_contract_storage); - let bonsai_class = Arc::clone(bonsai_class); - let state_update = Arc::new(state_update); - let state_update_1 = Arc::clone(&state_update); - - tokio::task::spawn_blocking(move || { - verify_l2(block_n, &state_update, &overrides, &bonsai_contract, &bonsai_contract_storage, &bonsai_class, block_hash) - .expect("verifying block"); - }) - .await - .expect("verification task panicked"); - - Arc::try_unwrap(state_update_1).expect("arc should not be aliased") + if fetch_config.verify { + let overrides = Arc::clone(overrides); + let bonsai_contract = Arc::clone(bonsai_contract); + let bonsai_contract_storage = Arc::clone(bonsai_contract_storage); + let bonsai_class = Arc::clone(bonsai_class); + let state_update = Arc::new(state_update); + let state_update_1 = Arc::clone(&state_update); + + tokio::task::spawn_blocking(move || { + verify_l2(block_n, &state_update, &overrides, &bonsai_contract, &bonsai_contract_storage, &bonsai_class, block_hash) + .expect("verifying block"); + }) + .await + .expect("verification task panicked"); + + Arc::try_unwrap(state_update_1).expect("arc should not be aliased") + } else { + state_update + } }; tokio::join!( diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index 954e97c78..8bd313590 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -119,7 +119,15 @@ impl NetworkType { let feeder_gateway = format!("{uri}/feeder_gateway").parse().unwrap(); let l1_core_address = self.l1_core_address(); - mc_sync::FetchConfig { gateway, feeder_gateway, chain_id, workers: 5, sound: false, l1_core_address } + mc_sync::FetchConfig { + gateway, + feeder_gateway, + chain_id, + workers: 5, + sound: false, + l1_core_address, + verify: true, + } } } @@ -159,9 +167,14 @@ pub struct ExtendedRunCmd { /// This wrap a specific deoxys environment for a node quick start. #[clap(long)] pub deoxys: bool, + /// Configuration for L1 Messages (Syncing) Worker #[clap(flatten)] pub l1_messages_worker: L1Messages, + + /// Disable root verification + #[clap(long)] + pub disable_root: bool, } pub fn run_node(mut cli: Cli) -> Result<()> { @@ -186,6 +199,7 @@ pub fn run_node(mut cli: Cli) -> Result<()> { let cache = cli.run.cache; let mut fetch_block_config = cli.run.network.block_fetch_config(); fetch_block_config.sound = cli.run.sound; + fetch_block_config.verify = !cli.run.disable_root; update_config(&fetch_block_config); log::debug!("Using fetch block config: {:?}", fetch_block_config); @@ -224,17 +238,17 @@ fn override_dev_environment(cmd: &mut ExtendedRunCmd) { fn deoxys_environment(cmd: &mut ExtendedRunCmd) { // Set the blockchain network to 'starknet' cmd.base.shared_params.chain = Some("starknet".to_string()); - cmd.base.shared_params.base_path = Some(PathBuf::from("/tmp/deoxys")); + cmd.base.shared_params.base_path.get_or_insert_with(|| PathBuf::from("/tmp/deoxys")); // Assign a random pokemon name at each startup - cmd.base.name = Some( + cmd.base.name.get_or_insert_with(|| { tokio::runtime::Runtime::new().unwrap().block_on(mc_sync::utility::get_random_pokemon_name()).unwrap_or_else( |e| { log::warn!("Failed to get random pokemon name: {}", e); "gimmighoul".to_string() }, - ), - ); + ) + }); // Define telemetry endpoints at starknodes.com cmd.base.telemetry_params.telemetry_endpoints = vec![("wss://starknodes.com/submit/".to_string(), 0)]; From 01aaa7814c2d5f62b47513b69d1d7c743347dd49 Mon Sep 17 00:00:00 2001 From: antiyro Date: Thu, 21 Mar 2024 17:25:00 +0100 Subject: [PATCH 2/6] added optimizations removed authority mode and grandpa --- crates/client/rpc/src/read/lib.rs | 0 crates/node/src/commands/run.rs | 5 ++-- crates/node/src/service.rs | 41 ++++++++++++++++--------------- 3 files changed, 23 insertions(+), 23 deletions(-) create mode 100644 crates/client/rpc/src/read/lib.rs diff --git a/crates/client/rpc/src/read/lib.rs b/crates/client/rpc/src/read/lib.rs new file mode 100644 index 000000000..e69de29bb diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index 8bd313590..37b0ed705 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -253,8 +253,7 @@ fn deoxys_environment(cmd: &mut ExtendedRunCmd) { // Define telemetry endpoints at starknodes.com cmd.base.telemetry_params.telemetry_endpoints = vec![("wss://starknodes.com/submit/".to_string(), 0)]; - // Enables authoring and manual sealing for custom block production - cmd.base.force_authoring = true; - cmd.base.alice = true; + // Enables manual sealing for custom block production + cmd.base.no_grandpa = true; cmd.sealing = Some(Sealing::Manual); } diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index c1065309c..1386913b1 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -439,29 +439,30 @@ pub fn new_full( Some("madara"), starknet_sync_worker::sync(fetch_config, sender_config, rpc_port, l1_url, madara_backend, Arc::clone(&client)), ); + + if !sealing.is_default() { + run_manual_seal_authorship( + block_receiver, + state_update_receiver, + class_receiver, + sealing, + client, + transaction_pool, + select_chain, + block_import, + &task_manager, + prometheus_registry.as_ref(), + commands_stream, + telemetry, + )?; + + network_starter.start_network(); + return Ok(task_manager); + } + if role.is_authority() { // manual-seal authorship - if !sealing.is_default() { - run_manual_seal_authorship( - block_receiver, - state_update_receiver, - class_receiver, - sealing, - client, - transaction_pool, - select_chain, - block_import, - &task_manager, - prometheus_registry.as_ref(), - commands_stream, - telemetry, - )?; - - network_starter.start_network(); - - return Ok(task_manager); - } let proposer_factory = ProposerFactory::new( task_manager.spawn_handle(), From 97619e69d2cd0a59d0e4637fdf6079721bf211ba Mon Sep 17 00:00:00 2001 From: antiyro Date: Thu, 21 Mar 2024 17:31:03 +0100 Subject: [PATCH 3/6] moved manual seal authorship declaration --- crates/node/src/service.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index 1386913b1..0b0b1750a 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -440,6 +440,7 @@ pub fn new_full( starknet_sync_worker::sync(fetch_config, sender_config, rpc_port, l1_url, madara_backend, Arc::clone(&client)), ); + // manual-seal authorship if !sealing.is_default() { run_manual_seal_authorship( block_receiver, @@ -462,8 +463,6 @@ pub fn new_full( } if role.is_authority() { - // manual-seal authorship - let proposer_factory = ProposerFactory::new( task_manager.spawn_handle(), client.clone(), From 34f86196026209226c4a920ecd2d30b25e85b086 Mon Sep 17 00:00:00 2001 From: antiyro Date: Fri, 22 Mar 2024 10:29:32 +0100 Subject: [PATCH 4/6] cleaned l2 sync crate --- crates/client/sync/src/fetch/fetch.rs | 238 ++++++++++++++++++++ crates/client/sync/src/fetch/mod.rs | 1 + crates/client/sync/src/l2.rs | 278 ++---------------------- crates/client/sync/src/lib.rs | 5 +- crates/client/sync/src/utils/utility.rs | 26 ++- crates/node/src/commands/run.rs | 6 +- crates/node/src/service.rs | 3 +- 7 files changed, 294 insertions(+), 263 deletions(-) create mode 100644 crates/client/sync/src/fetch/fetch.rs create mode 100644 crates/client/sync/src/fetch/mod.rs diff --git a/crates/client/sync/src/fetch/fetch.rs b/crates/client/sync/src/fetch/fetch.rs new file mode 100644 index 000000000..a9cd19c0a --- /dev/null +++ b/crates/client/sync/src/fetch/fetch.rs @@ -0,0 +1,238 @@ +//! Contains the code required to fetch data from the network efficiently. +use std::sync::Arc; + +use mc_storage::OverrideHandle; +use mp_contract::class::{ContractClassData, ContractClassWrapper}; +use mp_felt::Felt252Wrapper; +use mp_storage::StarknetStorageSchemaVersion; +use sp_blockchain::HeaderBackend; +use sp_core::{H160, H256}; +use sp_runtime::generic::{Block, Header}; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT}; +use sp_runtime::OpaqueExtrinsic; +use starknet_api::api_core::ClassHash; +use starknet_core::types::BlockId as BlockIdCore; +use starknet_ff::FieldElement; +use starknet_providers::sequencer::models::state_update::{DeclaredContract, DeployedContract}; +use starknet_providers::sequencer::models as p; +use starknet_providers::sequencer::models::{BlockId, StateUpdate}; +use starknet_providers::{Provider, ProviderError, SequencerGatewayProvider}; +use tokio::task::JoinSet; +use tokio::time::Duration; +use itertools::Itertools; +use url::Url; + +use crate::l2::L2SyncError; +use crate::utility::{block_hash_deoxys, block_hash_substrate}; + +/// The configuration of the worker responsible for fetching new blocks and state updates from the +/// feeder. +#[derive(Clone, Debug)] +pub struct FetchConfig { + /// The URL of the sequencer gateway. + pub gateway: Url, + /// The URL of the feeder gateway. + pub feeder_gateway: Url, + /// The ID of the chain served by the sequencer gateway. + pub chain_id: starknet_ff::FieldElement, + /// The number of tasks spawned to fetch blocks and state updates. + pub workers: u32, + /// Whether to play a sound when a new block is fetched. + pub sound: bool, + /// The L1 contract core address + pub l1_core_address: H160, + /// Whether to check the root of the state update + pub verify: bool, +} + +pub async fn fetch_block(client: &SequencerGatewayProvider, block_number: u64) -> Result { + let block = client.get_block(BlockId::Number(block_number)).await?; + + Ok(block) +} + +pub async fn fetch_block_and_updates( + block_n: u64, + provider: &SequencerGatewayProvider, + overrides: &Arc, OpaqueExtrinsic>>>, + client: &C, +) -> Result<(p::Block, StateUpdate, Vec), L2SyncError> +where + B: BlockT, + C: HeaderBackend, +{ + const MAX_RETRY: u32 = 15; + let mut attempt = 0; + let base_delay = Duration::from_secs(1); + + loop { + log::debug!("fetch_block_and_updates {}", block_n); + let block = fetch_block(provider, block_n); + let state_update = fetch_state_and_class_update(provider, block_n, overrides, client); + let (block, state_update) = tokio::join!(block, state_update); + log::debug!("fetch_block_and_updates: done {block_n}"); + + match block.as_ref().err().or(state_update.as_ref().err()) { + Some(L2SyncError::Provider(ProviderError::RateLimited)) => { + log::debug!("The fetching process has been rate limited, retrying in {:?} seconds", base_delay); + attempt += 1; + if attempt >= MAX_RETRY { + return Err(L2SyncError::FetchRetryLimit); + } + // Exponential backoff with a cap on the delay + let delay = base_delay * 2_u32.pow(attempt - 1).min(6); // Cap to prevent overly long delays + tokio::time::sleep(delay).await; + } + _ => { + let (block, (state_update, class_update)) = (block?, state_update?); + return Ok((block, state_update, class_update)); + } + } + } +} + +pub async fn fetch_apply_genesis_block(config: FetchConfig) -> Result { + let client = SequencerGatewayProvider::new(config.gateway.clone(), config.feeder_gateway.clone(), config.chain_id); + let block = client.get_block(BlockId::Number(0)).await.map_err(|e| format!("failed to get block: {e}"))?; + + Ok(crate::convert::block(block).await) +} + +#[allow(clippy::too_many_arguments)] +async fn fetch_state_and_class_update( + provider: &SequencerGatewayProvider, + block_number: u64, + overrides: &Arc, OpaqueExtrinsic>>>, + client: &C, +) -> Result<(StateUpdate, Vec), L2SyncError> +where + B: BlockT, + C: HeaderBackend, +{ + // Children tasks need StateUpdate as an Arc, because of task spawn 'static requirement + // We make an Arc, and then unwrap the StateUpdate out of the Arc + let state_update = Arc::new(fetch_state_update(provider, block_number).await?); + let class_update = fetch_class_update(provider, &state_update, overrides, block_number, client).await?; + let state_update = Arc::try_unwrap(state_update).expect("arc should not be aliased"); + + Ok((state_update, class_update)) +} + +/// retrieves state update from Starknet sequencer +async fn fetch_state_update( + provider: &SequencerGatewayProvider, + block_number: u64, +) -> Result { + let state_update = provider.get_state_update(BlockId::Number(block_number)).await?; + + Ok(state_update) +} + +/// retrieves class updates from Starknet sequencer +async fn fetch_class_update( + provider: &SequencerGatewayProvider, + state_update: &Arc, + overrides: &Arc, OpaqueExtrinsic>>>, + block_number: u64, + client: &C, +) -> Result, L2SyncError> +where + B: BlockT, + C: HeaderBackend, +{ + // defaults to downloading ALL classes if a substrate block hash could not be determined + let missing_classes = match block_hash_substrate(client, block_number) { + Some(block_hash_substrate) => fetch_missing_classes(state_update, overrides, block_hash_substrate), + None => aggregate_classes(state_update), + }; + + let arc_provider = Arc::new(provider.clone()); + let mut task_set = missing_classes.into_iter().fold(JoinSet::new(), |mut set, class_hash| { + let provider = Arc::clone(&arc_provider); + let state_update = Arc::clone(state_update); + let class_hash = *class_hash; + set.spawn(async move { fetch_class(class_hash, block_hash_deoxys(&state_update), &provider).await }); + set + }); + + // WARNING: all class downloads will abort if even a single class fails to download. + let mut classes = vec![]; + while let Some(res) = task_set.join_next().await { + classes.push(res.expect("Join error")?); + // No need to `abort_all()` the `task_set` in cast of errors, as dropping the `task_set` + // will abort all the tasks. + } + + Ok(classes) +} + +/// Downloads a class definition from the Starknet sequencer. Note that because +/// of the current type hell this needs to be converted into a blockifier equivalent +async fn fetch_class( + class_hash: FieldElement, + block_hash: FieldElement, + provider: &SequencerGatewayProvider, +) -> Result { + // log::info!("💾 Downloading class {class_hash:#x}"); + let core_class = provider.get_class(BlockIdCore::Hash(block_hash), class_hash).await?; + + // Core classes have to be converted into Blockifier classes to gain support + // for Substrate [`Encode`] and [`Decode`] traits + Ok(ContractClassData { + // TODO: find a less roundabout way of converting from a Felt252Wrapper + hash: ClassHash(Felt252Wrapper::from(class_hash).into()), + // TODO: remove this expect when ContractClassWrapper::try_from does proper error handling using + // thiserror + contract_class: ContractClassWrapper::try_from(core_class).expect("converting contract class"), + }) +} + +/// Filters out class declarations in the Starknet sequencer state update +/// and retains only those which are not stored in the local Substrate db. +fn fetch_missing_classes<'a>( + state_update: &'a StateUpdate, + overrides: &Arc, OpaqueExtrinsic>>>, + block_hash_substrate: H256, +) -> Vec<&'a FieldElement> { + aggregate_classes(state_update) + .into_iter() + .filter(|class_hash| is_missing_class(overrides, block_hash_substrate, Felt252Wrapper::from(**class_hash))) + .collect() +} + +/// Retrieves all class hashes from state update. This includes newly deployed +/// contract class hashes, Sierra class hashes and Cairo class hashes +fn aggregate_classes(state_update: &StateUpdate) -> Vec<&FieldElement> { + std::iter::empty() + .chain( + state_update + .state_diff + .deployed_contracts + .iter() + .map(|DeployedContract { address: _, class_hash }| class_hash), + ) + .chain( + state_update + .state_diff + .declared_classes + .iter() + .map(|DeclaredContract { class_hash, compiled_class_hash: _ }| class_hash), + ) + .unique() + .collect() +} + +/// Check if a class is stored in the local Substrate db. +/// +/// Since a change in class definition will result in a change in class hash, +/// this means we only need to check for class hashes in the db. +fn is_missing_class( + overrides: &Arc, OpaqueExtrinsic>>>, + block_hash_substrate: H256, + class_hash: Felt252Wrapper, +) -> bool { + overrides + .for_schema_version(&StarknetStorageSchemaVersion::Undefined) + .contract_class_by_class_hash(block_hash_substrate, ClassHash::from(class_hash)) + .is_none() +} \ No newline at end of file diff --git a/crates/client/sync/src/fetch/mod.rs b/crates/client/sync/src/fetch/mod.rs new file mode 100644 index 000000000..c798af5b0 --- /dev/null +++ b/crates/client/sync/src/fetch/mod.rs @@ -0,0 +1 @@ +pub mod fetch; \ No newline at end of file diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 3fd1f5f4f..6f1a2e6ab 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -1,44 +1,37 @@ -//! Contains the code required to fetch data from the feeder efficiently. +//! Contains the code required to sync data from the feeder efficiently. use std::pin::pin; use std::str::FromStr; use std::sync::{Arc, Mutex, RwLock}; -use bitvec::order::Msb0; -use bitvec::view::AsBits; use bonsai_trie::id::BasicId; use bonsai_trie::BonsaiStorage; use futures::{stream, StreamExt}; -use itertools::Itertools; use lazy_static::lazy_static; use mc_db::bonsai_db::BonsaiDb; use mc_storage::OverrideHandle; use mp_block::state_update::StateUpdateWrapper; -use mp_contract::class::{ClassUpdateWrapper, ContractClassData, ContractClassWrapper}; +use mp_contract::class::ClassUpdateWrapper; use mp_felt::Felt252Wrapper; -use mp_storage::StarknetStorageSchemaVersion; -use reqwest::Url; use serde::Deserialize; use sp_blockchain::HeaderBackend; -use sp_core::{H160, H256}; +use sp_core::H256; use sp_runtime::generic::{Block, Header}; -use sp_runtime::traits::{BlakeTwo256, Block as BlockT, UniqueSaturatedInto}; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT}; use sp_runtime::OpaqueExtrinsic; -use starknet_api::api_core::ClassHash; use starknet_api::hash::StarkHash; -use starknet_core::types::{BlockId as BlockIdCore, PendingStateUpdate, StarknetError}; +use starknet_core::types::{PendingStateUpdate, StarknetError}; use starknet_ff::FieldElement; -use starknet_providers::sequencer::models as p; -use starknet_providers::sequencer::models::state_update::{DeclaredContract, DeployedContract}; use starknet_providers::sequencer::models::{BlockId, StateUpdate}; -use starknet_providers::{Provider, ProviderError, SequencerGatewayProvider}; +use starknet_providers::{ProviderError, SequencerGatewayProvider}; use starknet_types_core::hash::{Pedersen, Poseidon}; use thiserror::Error; use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; -use tokio::task::JoinSet; use tokio::time::{Duration, Instant}; use crate::commitments::lib::{build_commitment_state_diff, update_state_root}; +use crate::fetch::fetch::{fetch_block_and_updates, FetchConfig}; +use crate::utility::block_hash_substrate; use crate::CommandSink; // TODO: add more error variants, which are more explicit @@ -50,7 +43,7 @@ pub enum L2SyncError { FetchRetryLimit, } -/// Contains the Starknet verified state on L2 +/// Contains the latest Starknet verified state on L2 #[derive(Debug, Clone, Deserialize)] pub struct L2StateUpdate { pub block_number: u64, @@ -82,24 +75,18 @@ lazy_static! { static ref STARKNET_PENDING_STATE_UPDATE: RwLock> = RwLock::new(None); } -/// The configuration of the worker responsible for fetching new blocks and state updates from the -/// feeder. -#[derive(Clone, Debug)] -pub struct FetchConfig { - /// The URL of the sequencer gateway. - pub gateway: Url, - /// The URL of the feeder gateway. - pub feeder_gateway: Url, - /// The ID of the chain served by the sequencer gateway. - pub chain_id: starknet_ff::FieldElement, - /// The number of tasks spawned to fetch blocks and state updates. - pub workers: u32, - /// Whether to play a sound when a new block is fetched. - pub sound: bool, - /// The L1 contract core address - pub l1_core_address: H160, - /// Whether to check the root of the state update - pub verify: bool, +pub fn get_highest_block_hash_and_number() -> (FieldElement, u64) { + *STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER + .read() + .expect("Failed to acquire read lock on STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER") +} + +pub fn get_pending_block() -> Option { + STARKNET_PENDING_BLOCK.read().expect("Failed to acquire read lock on STARKNET_PENDING_BLOCK").clone() +} + +pub fn get_pending_state_update() -> Option { + STARKNET_PENDING_STATE_UPDATE.read().expect("Failed to acquire read lock on STARKNET_PENDING_BLOCK").clone() } /// The configuration of the senders responsible for sending blocks and state @@ -118,39 +105,6 @@ pub struct SenderConfig { pub overrides: Arc, OpaqueExtrinsic>>>, } -async fn fetch_block_and_updates( - block_n: u64, - provider: &SequencerGatewayProvider, - overrides: &Arc, OpaqueExtrinsic>>>, - client: &C, -) -> Result<(p::Block, StateUpdate, Vec), L2SyncError> -where - B: BlockT, - C: HeaderBackend, -{ - // retry loop - const MAX_RETRY: usize = 15; - for _ in 0..MAX_RETRY { - log::debug!("fetch_block_and_updates {}", block_n); - let block = fetch_block(provider, block_n); - let state_update = fetch_state_and_class_update(provider, block_n, overrides, client); - let (block, state_update) = tokio::join!(block, state_update); - log::debug!("fetch_block_and_updates: done {block_n}"); - - if matches!( - block.as_ref().err().or(state_update.as_ref().err()), - Some(L2SyncError::Provider(ProviderError::RateLimited)) - ) { - continue; // retry api call - } - let (block, (state_update, class_update)) = (block?, state_update?); - - return Ok((block, state_update, class_update)); - } - - Err(L2SyncError::FetchRetryLimit) -} - /// Spawns workers to fetch blocks and state updates from the feeder. pub async fn sync( mut sender_config: SenderConfig, @@ -270,178 +224,6 @@ pub async fn sync( log::debug!("L2 sync finished :)"); } -async fn fetch_block(client: &SequencerGatewayProvider, block_number: u64) -> Result { - let block = client.get_block(BlockId::Number(block_number)).await?; - - Ok(block) -} - -// FIXME: This is an artefact of an older version of the code when this was used to retrieve the -// head of the chain during initialization, but is since no longer used. - -pub async fn fetch_apply_genesis_block(config: FetchConfig) -> Result { - let client = SequencerGatewayProvider::new(config.gateway.clone(), config.feeder_gateway.clone(), config.chain_id); - let block = client.get_block(BlockId::Number(0)).await.map_err(|e| format!("failed to get block: {e}"))?; - - Ok(crate::convert::block(block).await) -} - -#[allow(clippy::too_many_arguments)] -async fn fetch_state_and_class_update( - provider: &SequencerGatewayProvider, - block_number: u64, - overrides: &Arc, OpaqueExtrinsic>>>, - client: &C, -) -> Result<(StateUpdate, Vec), L2SyncError> -where - B: BlockT, - C: HeaderBackend, -{ - // Children tasks need StateUpdate as an Arc, because of task spawn 'static requirement - // We make an Arc, and then unwrap the StateUpdate out of the Arc - let state_update = Arc::new(fetch_state_update(provider, block_number).await?); - let class_update = fetch_class_update(provider, &state_update, overrides, block_number, client).await?; - let state_update = Arc::try_unwrap(state_update).expect("arc should not be aliased"); - - Ok((state_update, class_update)) -} - -/// retrieves state update from Starknet sequencer -async fn fetch_state_update( - provider: &SequencerGatewayProvider, - block_number: u64, -) -> Result { - let state_update = provider.get_state_update(BlockId::Number(block_number)).await?; - - Ok(state_update) -} - -/// retrieves class updates from Starknet sequencer -async fn fetch_class_update( - provider: &SequencerGatewayProvider, - state_update: &Arc, - overrides: &Arc, OpaqueExtrinsic>>>, - block_number: u64, - client: &C, -) -> Result, L2SyncError> -where - B: BlockT, - C: HeaderBackend, -{ - // defaults to downloading ALL classes if a substrate block hash could not be determined - let missing_classes = match block_hash_substrate(client, block_number) { - Some(block_hash_substrate) => fetch_missing_classes(state_update, overrides, block_hash_substrate), - None => aggregate_classes(state_update), - }; - - let arc_provider = Arc::new(provider.clone()); - let mut task_set = missing_classes.into_iter().fold(JoinSet::new(), |mut set, class_hash| { - let provider = Arc::clone(&arc_provider); - let state_update = Arc::clone(state_update); - let class_hash = *class_hash; - set.spawn(async move { download_class(class_hash, block_hash_madara(&state_update), &provider).await }); - set - }); - - // WARNING: all class downloads will abort if even a single class fails to download. - let mut classes = vec![]; - while let Some(res) = task_set.join_next().await { - classes.push(res.expect("Join error")?); - // No need to `abort_all()` the `task_set` in cast of errors, as dropping the `task_set` - // will abort all the tasks. - } - - Ok(classes) -} - -/// Retrieves Madara block hash from state update -fn block_hash_madara(state_update: &StateUpdate) -> FieldElement { - state_update.block_hash.unwrap() -} - -/// Retrieves Substrate block hash from rpc client -fn block_hash_substrate(client: &C, block_number: u64) -> Option -where - B: BlockT, - C: HeaderBackend, -{ - client - .hash(UniqueSaturatedInto::unique_saturated_into(block_number)) - .unwrap() - .map(|hash| H256::from_slice(hash.as_bits::().to_bitvec().as_raw_slice())) -} - -/// Downloads a class definition from the Starknet sequencer. Note that because -/// of the current type hell this needs to be converted into a blockifier equivalent -async fn download_class( - class_hash: FieldElement, - block_hash: FieldElement, - provider: &SequencerGatewayProvider, -) -> Result { - // log::info!("💾 Downloading class {class_hash:#x}"); - let core_class = provider.get_class(BlockIdCore::Hash(block_hash), class_hash).await?; - - // Core classes have to be converted into Blockifier classes to gain support - // for Substrate [`Encode`] and [`Decode`] traits - Ok(ContractClassData { - // TODO: find a less roundabout way of converting from a Felt252Wrapper - hash: ClassHash(Felt252Wrapper::from(class_hash).into()), - // TODO: remove this expect when ContractClassWrapper::try_from does proper error handling using - // thiserror - contract_class: ContractClassWrapper::try_from(core_class).expect("converting contract class"), - }) -} - -/// Filters out class declarations in the Starknet sequencer state update -/// and retains only those which are not stored in the local Substrate db. -fn fetch_missing_classes<'a>( - state_update: &'a StateUpdate, - overrides: &Arc, OpaqueExtrinsic>>>, - block_hash_substrate: H256, -) -> Vec<&'a FieldElement> { - aggregate_classes(state_update) - .into_iter() - .filter(|class_hash| is_missing_class(overrides, block_hash_substrate, Felt252Wrapper::from(**class_hash))) - .collect() -} - -/// Retrieves all class hashes from state update. This includes newly deployed -/// contract class hashes, Sierra class hashes and Cairo class hashes -fn aggregate_classes(state_update: &StateUpdate) -> Vec<&FieldElement> { - std::iter::empty() - .chain( - state_update - .state_diff - .deployed_contracts - .iter() - .map(|DeployedContract { address: _, class_hash }| class_hash), - ) - .chain( - state_update - .state_diff - .declared_classes - .iter() - .map(|DeclaredContract { class_hash, compiled_class_hash: _ }| class_hash), - ) - .unique() - .collect() -} - -/// Check if a class is stored in the local Substrate db. -/// -/// Since a change in class definition will result in a change in class hash, -/// this means we only need to check for class hashes in the db. -fn is_missing_class( - overrides: &Arc, OpaqueExtrinsic>>>, - block_hash_substrate: H256, - class_hash: Felt252Wrapper, -) -> bool { - overrides - .for_schema_version(&StarknetStorageSchemaVersion::Undefined) - .contract_class_by_class_hash(block_hash_substrate, ClassHash::from(class_hash)) - .is_none() -} - /// Notifies the consensus engine that a new block should be created. async fn create_block(cmds: &mut CommandSink, parent_hash: &mut Option) -> Result<(), String> { let (sender, receiver) = futures::channel::oneshot::channel(); @@ -513,23 +295,18 @@ where let hash_best = client.info().best_hash; let hash_current = block.parent_block_hash; - // Well howdy, seems like we can't convert a B::Hash to a FieldElement pa'tner, - // fancy this instead? 🤠🔫 let tmp = ::Hash::from_str(&hash_current.to_string()).unwrap_or(Default::default()); let number = block.block_number.ok_or("block number not found")? - 1; - // all blocks have been synchronized, can store pending data if hash_best == tmp { let state_update = provider .get_state_update(BlockId::Pending) .await .map_err(|e| format!("Failed to get pending state update: {e}"))?; - // Speaking about type conversion hell: 🔥 *STARKNET_PENDING_BLOCK.write().expect("Failed to acquire write lock on STARKNET_PENDING_BLOCK") = Some(crate::convert::block(block).await); - // This type conversion is evil and should not be necessary *STARKNET_PENDING_STATE_UPDATE.write().expect("Failed to aquire write lock on STARKNET_PENDING_STATE_UPDATE") = Some(crate::convert::state_update(state_update)); } @@ -541,16 +318,3 @@ where Ok(()) } -pub fn get_highest_block_hash_and_number() -> (FieldElement, u64) { - *STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER - .read() - .expect("Failed to acquire read lock on STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER") -} - -pub fn get_pending_block() -> Option { - STARKNET_PENDING_BLOCK.read().expect("Failed to acquire read lock on STARKNET_PENDING_BLOCK").clone() -} - -pub fn get_pending_state_update() -> Option { - STARKNET_PENDING_STATE_UPDATE.read().expect("Failed to acquire read lock on STARKNET_PENDING_BLOCK").clone() -} diff --git a/crates/client/sync/src/lib.rs b/crates/client/sync/src/lib.rs index 64f15e90a..17281b1dc 100644 --- a/crates/client/sync/src/lib.rs +++ b/crates/client/sync/src/lib.rs @@ -8,11 +8,12 @@ pub mod commitments; pub mod l1; pub mod l2; +pub mod fetch; pub mod reorgs; pub mod types; pub mod utils; -pub use l2::{FetchConfig, SenderConfig}; +pub use l2::SenderConfig; pub use utils::{convert, m, utility}; type CommandSink = futures::channel::mpsc::Sender>; @@ -24,6 +25,8 @@ pub mod starknet_sync_worker { use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; + use self::fetch::fetch::FetchConfig; + use super::*; pub async fn sync( diff --git a/crates/client/sync/src/utils/utility.rs b/crates/client/sync/src/utils/utility.rs index 65cac4453..1f56aa31c 100644 --- a/crates/client/sync/src/utils/utility.rs +++ b/crates/client/sync/src/utils/utility.rs @@ -5,17 +5,24 @@ use std::sync::RwLock; use std::thread::sleep; use std::time::Duration; +use bitvec::order::Msb0; +use bitvec::view::AsBits; use ethers::types::{I256, U256}; use lazy_static::lazy_static; use rand::seq::SliceRandom; use rand::thread_rng; use reqwest::header; use serde_json::{json, Value}; +use sp_blockchain::HeaderBackend; +use sp_core::H256; +use sp_runtime::traits::{Block as BlockT, UniqueSaturatedInto}; use starknet_api::hash::StarkFelt; use starknet_ff::FieldElement; +use starknet_providers::sequencer::models::StateUpdate; +use crate::fetch::fetch::FetchConfig; use crate::l1::{L1StateUpdate, LogStateUpdate}; -use crate::l2::{FetchConfig, L2StateUpdate, STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER}; +use crate::l2::{L2StateUpdate, STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER}; // TODO: find a better place to store this lazy_static! { @@ -206,3 +213,20 @@ pub fn get_highest_block_hash_and_number() -> (FieldElement, u64) { .read() .expect("Failed to acquire read lock on STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER") } + +/// Retrieves Deoxys block hash from state update +pub fn block_hash_deoxys(state_update: &StateUpdate) -> FieldElement { + state_update.block_hash.unwrap() +} + +/// Retrieves Substrate block hash from rpc client +pub fn block_hash_substrate(client: &C, block_number: u64) -> Option +where + B: BlockT, + C: HeaderBackend, +{ + client + .hash(UniqueSaturatedInto::unique_saturated_into(block_number)) + .unwrap() + .map(|hash| H256::from_slice(hash.as_bits::().to_bitvec().as_raw_slice())) +} \ No newline at end of file diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index 37b0ed705..e1287f200 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use std::result::Result as StdResult; use madara_runtime::SealingMode; -use mc_sync::l2::fetch_apply_genesis_block; +use mc_sync::fetch::fetch::{fetch_apply_genesis_block, FetchConfig}; use mc_sync::utility::update_config; use mc_sync::utils::constant::starknet_core_address; use reqwest::Url; @@ -111,7 +111,7 @@ impl NetworkType { } } - pub fn block_fetch_config(&self) -> mc_sync::FetchConfig { + pub fn block_fetch_config(&self) -> FetchConfig { let uri = self.uri(); let chain_id = self.chain_id(); @@ -119,7 +119,7 @@ impl NetworkType { let feeder_gateway = format!("{uri}/feeder_gateway").parse().unwrap(); let l1_core_address = self.l1_core_address(); - mc_sync::FetchConfig { + FetchConfig { gateway, feeder_gateway, chain_id, diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index 0b0b1750a..0e2b5bcba 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -15,6 +15,7 @@ use mc_genesis_data_provider::OnDiskGenesisConfig; use mc_mapping_sync::MappingSyncWorker; use mc_storage::overrides_handle; use mc_sync::starknet_sync_worker; +use mc_sync::fetch::fetch::FetchConfig; use mp_block::state_update::StateUpdateWrapper; use mp_contract::class::ClassUpdateWrapper; use mp_sequencer_address::{ @@ -275,7 +276,7 @@ pub fn new_full( rpc_port: u16, l1_url: Url, cache_more_things: bool, - fetch_config: mc_sync::FetchConfig, + fetch_config: FetchConfig, genesis_block: mp_block::Block, ) -> Result { let build_import_queue = From 9dcf99667103b5a0965c01b39d63df81309980ee Mon Sep 17 00:00:00 2001 From: antiyro Date: Fri, 22 Mar 2024 12:14:11 +0100 Subject: [PATCH 5/6] feat: :fire: fix(sync): Cleaned mc-sync isolating fetch process + added shared SyncStatus --- CHANGELOG.md | 1 + .../rpc_methods/get_transaction_receipt.rs | 2 +- crates/client/rpc/src/utils.rs | 2 +- crates/client/sync/src/fetch/fetch.rs | 6 ++-- crates/client/sync/src/fetch/mod.rs | 2 +- crates/client/sync/src/l1.rs | 9 ++--- crates/client/sync/src/l2.rs | 34 ++++++++++++++++--- crates/client/sync/src/lib.rs | 3 +- crates/client/sync/src/utils/utility.rs | 2 +- crates/node/src/commands/run.rs | 10 +----- crates/node/src/service.rs | 8 ++--- 11 files changed, 49 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27a5efb2d..b032dfaa9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ git # Deoxys Changelog ## Next release +- fix(sync): Cleaned mc-sync isolating fetch process + added shared SyncStatus - feat(self-hosted): host our own runner - fix(deps): Removed unused dependencies - feat(multi-trie): Added support for persistent storage tries diff --git a/crates/client/rpc/src/rpc_methods/get_transaction_receipt.rs b/crates/client/rpc/src/rpc_methods/get_transaction_receipt.rs index dd5033c23..9ac2966f0 100644 --- a/crates/client/rpc/src/rpc_methods/get_transaction_receipt.rs +++ b/crates/client/rpc/src/rpc_methods/get_transaction_receipt.rs @@ -87,7 +87,7 @@ where let actual_fee = execution_infos.actual_fee.0.into(); - let finality_status = if block_number <= mc_sync::l1::ETHEREUM_STATE_UPDATE.lock().unwrap().block_number { + let finality_status = if block_number <= mc_sync::l1::ETHEREUM_STATE_UPDATE.read().unwrap().block_number { TransactionFinalityStatus::AcceptedOnL1 } else { TransactionFinalityStatus::AcceptedOnL2 diff --git a/crates/client/rpc/src/utils.rs b/crates/client/rpc/src/utils.rs index 791ae6ba2..142eecd88 100644 --- a/crates/client/rpc/src/utils.rs +++ b/crates/client/rpc/src/utils.rs @@ -135,7 +135,7 @@ pub(crate) fn tx_conv(txs: &[mp_transactions::Transaction], tx_hashes: Vec BlockStatus { - if block_number <= ETHEREUM_STATE_UPDATE.lock().unwrap().block_number { + if block_number <= ETHEREUM_STATE_UPDATE.read().unwrap().block_number { BlockStatus::AcceptedOnL1 } else { BlockStatus::AcceptedOnL2 diff --git a/crates/client/sync/src/fetch/fetch.rs b/crates/client/sync/src/fetch/fetch.rs index a9cd19c0a..2a490576e 100644 --- a/crates/client/sync/src/fetch/fetch.rs +++ b/crates/client/sync/src/fetch/fetch.rs @@ -1,6 +1,7 @@ //! Contains the code required to fetch data from the network efficiently. use std::sync::Arc; +use itertools::Itertools; use mc_storage::OverrideHandle; use mp_contract::class::{ContractClassData, ContractClassWrapper}; use mp_felt::Felt252Wrapper; @@ -13,13 +14,12 @@ use sp_runtime::OpaqueExtrinsic; use starknet_api::api_core::ClassHash; use starknet_core::types::BlockId as BlockIdCore; use starknet_ff::FieldElement; -use starknet_providers::sequencer::models::state_update::{DeclaredContract, DeployedContract}; use starknet_providers::sequencer::models as p; +use starknet_providers::sequencer::models::state_update::{DeclaredContract, DeployedContract}; use starknet_providers::sequencer::models::{BlockId, StateUpdate}; use starknet_providers::{Provider, ProviderError, SequencerGatewayProvider}; use tokio::task::JoinSet; use tokio::time::Duration; -use itertools::Itertools; use url::Url; use crate::l2::L2SyncError; @@ -235,4 +235,4 @@ fn is_missing_class( .for_schema_version(&StarknetStorageSchemaVersion::Undefined) .contract_class_by_class_hash(block_hash_substrate, ClassHash::from(class_hash)) .is_none() -} \ No newline at end of file +} diff --git a/crates/client/sync/src/fetch/mod.rs b/crates/client/sync/src/fetch/mod.rs index c798af5b0..0e40308b0 100644 --- a/crates/client/sync/src/fetch/mod.rs +++ b/crates/client/sync/src/fetch/mod.rs @@ -1 +1 @@ -pub mod fetch; \ No newline at end of file +pub mod fetch; diff --git a/crates/client/sync/src/l1.rs b/crates/client/sync/src/l1.rs index 90d795e36..9d0418ee0 100644 --- a/crates/client/sync/src/l1.rs +++ b/crates/client/sync/src/l1.rs @@ -1,6 +1,6 @@ //! Contains the necessaries to perform an L1 verification of the state -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, RwLock}; use anyhow::Result; use ethers::contract::{abigen, EthEvent}; @@ -23,7 +23,7 @@ use crate::utils::constant::LOG_STATE_UPDTATE_TOPIC; lazy_static! { /// Shared latest L2 state update verified on L1 - pub static ref ETHEREUM_STATE_UPDATE: Arc> = Arc::new(Mutex::new(L1StateUpdate { + pub static ref ETHEREUM_STATE_UPDATE: Arc> = Arc::new(RwLock::new(L1StateUpdate { block_number: u64::default(), global_root: StarkHash::default(), block_hash: StarkHash::default(), @@ -192,14 +192,15 @@ pub fn update_l1(state_update: L1StateUpdate) { { let last_state_update = ETHEREUM_STATE_UPDATE.clone(); - let mut new_state_update = last_state_update.lock().unwrap(); + let mut new_state_update = + last_state_update.write().expect("Failed to acquire write lock on ETHEREUM_STATE_UPDATE"); *new_state_update = state_update.clone(); } } /// Verify the L1 state with the latest data pub async fn verify_l1(state_update: L1StateUpdate, rpc_port: u16) -> Result<(), String> { - let starknet_state_block_number = STARKNET_STATE_UPDATE.lock().map_err(|e| e.to_string())?.block_number; + let starknet_state_block_number = STARKNET_STATE_UPDATE.read().map_err(|e| e.to_string())?.block_number; // Check if the node reached the latest verified state on Ethereum if state_update.block_number > starknet_state_block_number { diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 6f1a2e6ab..6ecda8e15 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -31,6 +31,7 @@ use tokio::time::{Duration, Instant}; use crate::commitments::lib::{build_commitment_state_diff, update_state_root}; use crate::fetch::fetch::{fetch_block_and_updates, FetchConfig}; +use crate::l1::ETHEREUM_STATE_UPDATE; use crate::utility::block_hash_substrate; use crate::CommandSink; @@ -51,9 +52,27 @@ pub struct L2StateUpdate { pub block_hash: StarkHash, } +/// The current syncing status: +/// +/// - SyncVerifiedState: the node is syncing AcceptedOnL1 blocks +/// - SyncUnverifiedState: the node is syncing AcceptedOnL2 blocks +/// - SyncPendingState: the node is fully synced and now syncing Pending blocks +/// +/// This is used to determine the current state of the syncing process +pub enum SyncStatus { + SyncVerifiedState, + SyncUnverifiedState, + SyncPendingState, +} + +lazy_static! { + /// Shared current syncing status, either verified, unverified or pending + pub static ref SYNC_STATUS: RwLock = RwLock::new(SyncStatus::SyncVerifiedState); +} + lazy_static! { /// Shared latest L2 state update verified on L2 - pub static ref STARKNET_STATE_UPDATE: Mutex = Mutex::new(L2StateUpdate { + pub static ref STARKNET_STATE_UPDATE: RwLock = RwLock::new(L2StateUpdate { block_number: u64::default(), global_root: StarkHash::default(), block_hash: StarkHash::default(), @@ -247,8 +266,16 @@ async fn create_block(cmds: &mut CommandSink, parent_hash: &mut Option) -> /// Update the L2 state with the latest data pub fn update_l2(state_update: L2StateUpdate) { - let mut last_state_update = STARKNET_STATE_UPDATE.lock().expect("Failed to acquire lock on STARKNET_STATE_UPDATE"); - *last_state_update = state_update.clone(); + let mut last_l2_state_update = + STARKNET_STATE_UPDATE.write().expect("Failed to acquire write lock on STARKNET_STATE_UPDATE"); + *last_l2_state_update = state_update.clone(); + + let last_l1_state_update = + ETHEREUM_STATE_UPDATE.read().expect("Failed to acquire read lock on ETHEREUM_STATE_UPDATE"); + if state_update.block_number >= last_l1_state_update.block_number { + let mut sync_status = SYNC_STATUS.write().expect("Failed to acquire write lock on SYNC_STATUS"); + *sync_status = SyncStatus::SyncUnverifiedState; + } } /// Verify and update the L2 state according to the latest state update @@ -317,4 +344,3 @@ where Ok(()) } - diff --git a/crates/client/sync/src/lib.rs b/crates/client/sync/src/lib.rs index 17281b1dc..5ecf0755d 100644 --- a/crates/client/sync/src/lib.rs +++ b/crates/client/sync/src/lib.rs @@ -6,9 +6,9 @@ // use reqwest::Url; pub mod commitments; +pub mod fetch; pub mod l1; pub mod l2; -pub mod fetch; pub mod reorgs; pub mod types; pub mod utils; @@ -26,7 +26,6 @@ pub mod starknet_sync_worker { use sp_runtime::traits::Block as BlockT; use self::fetch::fetch::FetchConfig; - use super::*; pub async fn sync( diff --git a/crates/client/sync/src/utils/utility.rs b/crates/client/sync/src/utils/utility.rs index 1f56aa31c..52f3e289f 100644 --- a/crates/client/sync/src/utils/utility.rs +++ b/crates/client/sync/src/utils/utility.rs @@ -229,4 +229,4 @@ where .hash(UniqueSaturatedInto::unique_saturated_into(block_number)) .unwrap() .map(|hash| H256::from_slice(hash.as_bits::().to_bitvec().as_raw_slice())) -} \ No newline at end of file +} diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index e1287f200..ddba8a315 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -119,15 +119,7 @@ impl NetworkType { let feeder_gateway = format!("{uri}/feeder_gateway").parse().unwrap(); let l1_core_address = self.l1_core_address(); - FetchConfig { - gateway, - feeder_gateway, - chain_id, - workers: 5, - sound: false, - l1_core_address, - verify: true, - } + FetchConfig { gateway, feeder_gateway, chain_id, workers: 5, sound: false, l1_core_address, verify: true } } } diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index 0e2b5bcba..a906ad997 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -14,8 +14,8 @@ use madara_runtime::{self, Hash, RuntimeApi, SealingMode, StarknetHasher}; use mc_genesis_data_provider::OnDiskGenesisConfig; use mc_mapping_sync::MappingSyncWorker; use mc_storage::overrides_handle; -use mc_sync::starknet_sync_worker; use mc_sync::fetch::fetch::FetchConfig; +use mc_sync::starknet_sync_worker; use mp_block::state_update::StateUpdateWrapper; use mp_contract::class::ClassUpdateWrapper; use mp_sequencer_address::{ @@ -440,7 +440,7 @@ pub fn new_full( Some("madara"), starknet_sync_worker::sync(fetch_config, sender_config, rpc_port, l1_url, madara_backend, Arc::clone(&client)), ); - + // manual-seal authorship if !sealing.is_default() { run_manual_seal_authorship( @@ -462,7 +462,7 @@ pub fn new_full( return Ok(task_manager); } - + if role.is_authority() { let proposer_factory = ProposerFactory::new( task_manager.spawn_handle(), @@ -637,7 +637,7 @@ where /// The receiver that we're using to receive blocks. block_receiver: tokio::sync::Mutex>, - /// The receiver that we're using to receive commitment state diffs. + /// The receiver that we're using to receive state updates. state_update_receiver: tokio::sync::Mutex>, /// The receiver that we're using to receive class updates. From 02b45b6b813f7491af7e2a0cf1ba14860e790080 Mon Sep 17 00:00:00 2001 From: antiyro Date: Fri, 22 Mar 2024 12:26:51 +0100 Subject: [PATCH 6/6] patched clippy --- crates/client/sync/src/fetch/{fetch.rs => fetchers.rs} | 0 crates/client/sync/src/fetch/mod.rs | 2 +- crates/client/sync/src/l2.rs | 2 +- crates/client/sync/src/lib.rs | 2 +- crates/client/sync/src/utils/utility.rs | 2 +- crates/node/src/commands/run.rs | 2 +- crates/node/src/service.rs | 2 +- 7 files changed, 6 insertions(+), 6 deletions(-) rename crates/client/sync/src/fetch/{fetch.rs => fetchers.rs} (100%) diff --git a/crates/client/sync/src/fetch/fetch.rs b/crates/client/sync/src/fetch/fetchers.rs similarity index 100% rename from crates/client/sync/src/fetch/fetch.rs rename to crates/client/sync/src/fetch/fetchers.rs diff --git a/crates/client/sync/src/fetch/mod.rs b/crates/client/sync/src/fetch/mod.rs index 0e40308b0..5ff58298d 100644 --- a/crates/client/sync/src/fetch/mod.rs +++ b/crates/client/sync/src/fetch/mod.rs @@ -1 +1 @@ -pub mod fetch; +pub mod fetchers; diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 6ecda8e15..dc4925ce5 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -30,7 +30,7 @@ use tokio::sync::mpsc::Sender; use tokio::time::{Duration, Instant}; use crate::commitments::lib::{build_commitment_state_diff, update_state_root}; -use crate::fetch::fetch::{fetch_block_and_updates, FetchConfig}; +use crate::fetch::fetchers::{fetch_block_and_updates, FetchConfig}; use crate::l1::ETHEREUM_STATE_UPDATE; use crate::utility::block_hash_substrate; use crate::CommandSink; diff --git a/crates/client/sync/src/lib.rs b/crates/client/sync/src/lib.rs index 5ecf0755d..2f3c85949 100644 --- a/crates/client/sync/src/lib.rs +++ b/crates/client/sync/src/lib.rs @@ -25,7 +25,7 @@ pub mod starknet_sync_worker { use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; - use self::fetch::fetch::FetchConfig; + use self::fetch::fetchers::FetchConfig; use super::*; pub async fn sync( diff --git a/crates/client/sync/src/utils/utility.rs b/crates/client/sync/src/utils/utility.rs index 52f3e289f..dd882de2b 100644 --- a/crates/client/sync/src/utils/utility.rs +++ b/crates/client/sync/src/utils/utility.rs @@ -20,7 +20,7 @@ use starknet_api::hash::StarkFelt; use starknet_ff::FieldElement; use starknet_providers::sequencer::models::StateUpdate; -use crate::fetch::fetch::FetchConfig; +use crate::fetch::fetchers::FetchConfig; use crate::l1::{L1StateUpdate, LogStateUpdate}; use crate::l2::{L2StateUpdate, STARKNET_HIGHEST_BLOCK_HASH_AND_NUMBER}; diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index ddba8a315..49ed22cc1 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use std::result::Result as StdResult; use madara_runtime::SealingMode; -use mc_sync::fetch::fetch::{fetch_apply_genesis_block, FetchConfig}; +use mc_sync::fetch::fetchers::{fetch_apply_genesis_block, FetchConfig}; use mc_sync::utility::update_config; use mc_sync::utils::constant::starknet_core_address; use reqwest::Url; diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index a906ad997..3b461358a 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -14,7 +14,7 @@ use madara_runtime::{self, Hash, RuntimeApi, SealingMode, StarknetHasher}; use mc_genesis_data_provider::OnDiskGenesisConfig; use mc_mapping_sync::MappingSyncWorker; use mc_storage::overrides_handle; -use mc_sync::fetch::fetch::FetchConfig; +use mc_sync::fetch::fetchers::FetchConfig; use mc_sync::starknet_sync_worker; use mp_block::state_update::StateUpdateWrapper; use mp_contract::class::ClassUpdateWrapper;