diff --git a/Cargo.lock b/Cargo.lock index a8ecbd7636d..5bbd7217f4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3147,15 +3147,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.12.0" @@ -4717,7 +4708,7 @@ checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", "heck 0.4.1", - "itertools 0.11.0", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4738,7 +4729,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.10.5", "proc-macro2 1.0.86", "quote 1.0.36", "syn 2.0.72", @@ -8156,9 +8147,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -8192,9 +8183,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1bed5bd7e219cc1429ae36732f6d943e4d98a1b4ddcbb60cff89a3a4d3bcd6" +checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" dependencies = [ "anyhow", "async-trait", @@ -8214,9 +8205,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0883af373e9198fd27c0148e7e47b37f912cb4b444bec3f7eed0af0b0dfc69" +checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" dependencies = [ "anyhow", "blst", @@ -8238,9 +8229,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d70afdfc07658d6bb309237c5da2cab40ab7efed95538c92fd0340b1b967818c" +checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" dependencies = [ "anyhow", "async-trait", @@ -8259,9 +8250,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82f6f2dbd122b60a199843bd70b9b979190e81458fe17180e23f930ea2194e1" +checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" dependencies = [ "anyhow", "async-trait", @@ -8294,9 +8285,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e426aa7c68a12dde702c3ec4ef49de24d9054ef908384232b7887e043ca3f2fe" +checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" dependencies = [ "anyhow", "bit-vec", @@ -8316,9 +8307,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8388c33fd5bc3725e58c26db2d3016538c6221c6448b3e92cf5df07f6074a028" +checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" dependencies = [ "anyhow", "async-trait", @@ -8336,9 +8327,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand 0.8.5", @@ -9306,9 +9297,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -9327,9 +9318,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index f2c62efb453..d4855a34b9d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -221,16 +221,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "9a38900d7af9b1d72b47ce3be980e77c1239a61d" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.10" -zksync_consensus_bft = "=0.1.0-rc.10" -zksync_consensus_crypto = "=0.1.0-rc.10" -zksync_consensus_executor = "=0.1.0-rc.10" -zksync_consensus_network = "=0.1.0-rc.10" -zksync_consensus_roles = "=0.1.0-rc.10" -zksync_consensus_storage = "=0.1.0-rc.10" -zksync_consensus_utils = "=0.1.0-rc.10" -zksync_protobuf = "=0.1.0-rc.10" -zksync_protobuf_build = "=0.1.0-rc.10" +zksync_concurrency = "=0.1.0-rc.11" +zksync_consensus_bft = "=0.1.0-rc.11" +zksync_consensus_crypto = "=0.1.0-rc.11" +zksync_consensus_executor = "=0.1.0-rc.11" +zksync_consensus_network = "=0.1.0-rc.11" +zksync_consensus_roles = "=0.1.0-rc.11" +zksync_consensus_storage = "=0.1.0-rc.11" +zksync_consensus_utils = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf_build = "=0.1.0-rc.11" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index d8f28705421..8f05cb38177 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -454,7 +454,7 @@ impl ConsensusDal<'_, '_> { /// Gets a number of the last L1 batch that was inserted. It might have gaps before it, /// depending on the order in which votes have been collected over gossip by consensus. - pub async fn get_last_batch_certificate_number( + pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { let row = sqlx::query!( @@ -465,7 +465,7 @@ impl ConsensusDal<'_, '_> { l1_batches_consensus "# ) - .instrument("get_last_batch_certificate_number") + .instrument("last_batch_certificate_number") .report_latency() .fetch_one(self.storage) .await?; @@ -480,7 +480,7 @@ impl ConsensusDal<'_, '_> { /// Number of L1 batch that the L2 block belongs to. /// None if the L2 block doesn't exist. - async fn batch_of_block( + pub async fn batch_of_block( &mut self, block: validator::BlockNumber, ) -> anyhow::Result> { @@ -535,9 +535,9 @@ impl ConsensusDal<'_, '_> { let Some(next_batch_to_attest) = async { // First batch that we don't have a certificate for. if let Some(last) = self - .get_last_batch_certificate_number() + .last_batch_certificate_number() .await - .context("get_last_batch_certificate_number()")? + .context("last_batch_certificate_number()")? { return Ok(Some(last + 1)); } @@ -669,7 +669,7 @@ mod tests { // Retrieve the latest certificate. let number = conn .consensus_dal() - .get_last_batch_certificate_number() + .last_batch_certificate_number() .await .unwrap() .unwrap(); diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index ce8a555e06d..259cac5d074 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,10 +1,8 @@ +use std::sync::Arc; + use anyhow::Context as _; -use async_trait::async_trait; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; -use zksync_consensus_executor::{ - self as executor, - attestation::{AttestationStatusClient, AttestationStatusRunner}, -}; +use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; @@ -38,9 +36,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, ) -> anyhow::Result<()> { - let attester = config::attester_key(&secrets) - .context("attester_key")? - .map(|key| executor::Attester { key }); + let attester = config::attester_key(&secrets).context("attester_key")?; tracing::debug!( is_attester = attester.is_some(), @@ -53,7 +49,6 @@ impl EN { // Initialize genesis. let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; - let genesis_hash = genesis.hash(); let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; conn.try_update_genesis(ctx, &genesis) @@ -74,18 +69,21 @@ impl EN { // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. - s.spawn_bg::<()>(async { - let old = genesis; - loop { - if let Ok(new) = self.fetch_genesis(ctx).await { - if new != old { - return Err(anyhow::format_err!( - "genesis changed: old {old:?}, new {new:?}" - ) - .into()); + s.spawn_bg::<()>({ + let old = genesis.clone(); + async { + let old = old; + loop { + if let Ok(new) = self.fetch_genesis(ctx).await { + if new != old { + return Err(anyhow::format_err!( + "genesis changed: old {old:?}, new {new:?}" + ) + .into()); + } } + ctx.sleep(time::Duration::seconds(5)).await?; } - ctx.sleep(time::Duration::seconds(5)).await?; } }); @@ -106,17 +104,8 @@ impl EN { .wrap("BatchStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (attestation_status, runner) = { - AttestationStatusRunner::init( - ctx, - Box::new(MainNodeAttestationStatus(self.client.clone())), - time::Duration::seconds(5), - genesis_hash, - ) - .await - .wrap("AttestationStatusRunner::init()")? - }; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + let attestation = Arc::new(attestation::Controller::new(attester)); + s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); let executor = executor::Executor { config: config::executor(&cfg, &secrets)?, @@ -129,8 +118,7 @@ impl EN { replica_store: Box::new(store.clone()), payload_manager: Box::new(store.clone()), }), - attester, - attestation_status, + attestation, }; tracing::info!("running the external node executor"); executor.run(ctx).await?; @@ -174,6 +162,62 @@ impl EN { } } + /// Monitors the `AttestationStatus` on the main node, + /// and updates the attestation config accordingly. + async fn run_attestation_updater( + &self, + ctx: &ctx::Ctx, + genesis: validator::Genesis, + attestation: Arc, + ) -> ctx::Result<()> { + const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let Some(committee) = &genesis.attesters else { + return Ok(()); + }; + let committee = Arc::new(committee.clone()); + let mut next = attester::BatchNumber(0); + loop { + let status = loop { + match self.fetch_attestation_status(ctx).await { + Err(err) => tracing::warn!("{err:#}"), + Ok(status) => { + if status.genesis != genesis.hash() { + return Err(anyhow::format_err!("genesis mismatch").into()); + } + if status.next_batch_to_attest >= next { + break status; + } + } + } + ctx.sleep(POLL_INTERVAL).await?; + }; + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let hash = self + .pool + .wait_for_batch_hash(ctx, status.next_batch_to_attest) + .await?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + genesis: status.genesis, + hash, + number: status.next_batch_to_attest, + }, + committee: committee.clone(), + })) + .await + .context("start_attestation()")?; + next = status.next_batch_to_attest.next(); + } + } + /// Periodically fetches the head of the main node /// and updates `SyncState` accordingly. async fn fetch_state_loop(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { @@ -213,6 +257,22 @@ impl EN { .with_hash()) } + #[tracing::instrument(skip_all)] + async fn fetch_attestation_status( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.fetch_attestation_status()).await? { + Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) + .context("deserialize(AttestationStatus")?), + Ok(None) => Err(anyhow::format_err!("empty response").into()), + Err(err) => Err(anyhow::format_err!( + "AttestationStatus call to main node HTTP RPC failed: {err:#}" + ) + .into()), + } + } + /// Fetches (with retries) the given block from the main node. async fn fetch_block(&self, ctx: &ctx::Ctx, n: L2BlockNumber) -> ctx::Result { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); @@ -269,31 +329,3 @@ impl EN { Ok(()) } } - -/// Wrapper to call [MainNodeClient::fetch_attestation_status] and adapt the return value to [AttestationStatusClient]. -struct MainNodeAttestationStatus(Box>); - -#[async_trait] -impl AttestationStatusClient for MainNodeAttestationStatus { - async fn attestation_status( - &self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - match ctx.wait(self.0.fetch_attestation_status()).await? { - Ok(Some(status)) => { - // If this fails the AttestationStatusRunner will log it an retry it later, - // but it won't stop the whole node. - let status: consensus_dal::AttestationStatus = - zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?; - - Ok(Some((status.genesis, status.next_batch_to_attest))) - } - Ok(None) => Ok(None), - Err(err) => { - tracing::warn!("AttestationStatus call to main node HTTP RPC failed: {err}"); - Ok(None) - } - } - } -} diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index b5e76afd63e..7de86b4d8ba 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -1,13 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_consensus_executor::{self as executor, attestation::AttestationStatusRunner, Attester}; -use zksync_consensus_roles::validator; +use zksync_consensus_executor::{self as executor, attestation}; +use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use crate::{ config, - storage::{ConnectionPool, Store}, + storage::{ConnectionPool, InsertCertificateError, Store}, }; /// Task running a consensus validator for the main node. @@ -23,9 +25,7 @@ pub async fn run_main_node( .context("validator_key")? .context("missing validator_key")?; - let attester = config::attester_key(&secrets) - .context("attester_key")? - .map(|key| Attester { key }); + let attester = config::attester_key(&secrets).context("attester_key")?; tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); @@ -42,7 +42,9 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool, None).await.wrap("Store::new()")?; + let (store, runner) = Store::new(ctx, pool.clone(), None) + .await + .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) @@ -50,8 +52,9 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); + let genesis = block_store.genesis().clone(); anyhow::ensure!( - block_store.genesis().leader_selection + genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); @@ -61,17 +64,13 @@ pub async fn run_main_node( .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (attestation_status, runner) = { - AttestationStatusRunner::init_from_store( - ctx, - batch_store.clone(), - time::Duration::seconds(1), - block_store.genesis().hash(), - ) - .await - .wrap("AttestationStatusRunner::init_from_store()")? - }; - s.spawn_bg(runner.run(ctx)); + let attestation = Arc::new(attestation::Controller::new(attester)); + s.spawn_bg(run_attestation_updater( + ctx, + &pool, + genesis, + attestation.clone(), + )); let executor = executor::Executor { config: config::executor(&cfg, &secrets)?, @@ -82,8 +81,7 @@ pub async fn run_main_node( replica_store: Box::new(store.clone()), payload_manager: Box::new(store.clone()), }), - attester, - attestation_status, + attestation, }; tracing::info!("running the main node executor"); @@ -91,3 +89,85 @@ pub async fn run_main_node( }) .await } + +/// Manages attestation state by configuring the +/// next batch to attest and storing the collected +/// certificates. +async fn run_attestation_updater( + ctx: &ctx::Ctx, + pool: &ConnectionPool, + genesis: validator::Genesis, + attestation: Arc, +) -> anyhow::Result<()> { + const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let res = async { + let Some(committee) = &genesis.attesters else { + return Ok(()); + }; + let committee = Arc::new(committee.clone()); + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) + .await + .wrap("connection()")? + .attestation_status(ctx) + .await + .wrap("attestation_status()")? + { + Some(status) => break status, + None => ctx.sleep(POLL_INTERVAL).await?, + } + }; + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let hash = pool + .wait_for_batch_hash(ctx, status.next_batch_to_attest) + .await?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee: committee.clone(), + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .map_err(|err| match err { + InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), + InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), + })?; + } + } + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0e2039ae6bc..6ff2fb1ce0a 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -27,6 +27,7 @@ impl ConnectionPool { } /// Waits for the `number` L2 block. + #[tracing::instrument(skip_all)] pub async fn wait_for_payload( &self, ctx: &ctx::Ctx, @@ -47,6 +48,29 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } } + + /// Waits for the `number` L1 batch hash. + #[tracing::instrument(skip_all)] + pub async fn wait_for_batch_hash( + &self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + loop { + if let Some(hash) = self + .connection(ctx) + .await + .wrap("connection()")? + .batch_hash(ctx, number) + .await + .with_wrap(|| format!("batch_hash({number})"))? + { + return Ok(hash); + } + ctx.sleep(POLL_INTERVAL).await?; + } + } } /// Context-aware `zksync_dal::Connection` wrapper. @@ -321,29 +345,6 @@ impl<'a> Connection<'a> { .map(|nr| attester::BatchNumber(nr.0 as u64))) } - /// Wrapper for `consensus_dal().get_last_batch_certificate_number()`. - pub async fn get_last_batch_certificate_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().get_last_batch_certificate_number()) - .await? - .context("get_last_batch_certificate_number()")?) - } - - /// Wrapper for `consensus_dal().batch_certificate()`. - pub async fn batch_certificate( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().batch_certificate(number)) - .await? - .context("batch_certificate()")?) - } - /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. pub async fn get_l2_block_range_of_l1_batch( &mut self, diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 0e08811c237..6a96812ae40 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -57,8 +57,6 @@ pub(crate) struct Store { block_payloads: Arc>>, /// L2 block QCs received from consensus block_certificates: ctx::channel::UnboundedSender, - /// L1 batch QCs received from consensus - batch_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, /// Range of L1 batches we have persisted. @@ -73,7 +71,6 @@ pub struct StoreRunner { blocks_persisted: PersistedBlockState, batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, - batch_certificates: ctx::channel::UnboundedReceiver, } impl Store { @@ -98,13 +95,11 @@ impl Store { let blocks_persisted = sync::watch::channel(blocks_persisted).0; let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); - let (batch_certs_send, batch_certs_recv) = ctx::channel::unbounded(); Ok(( Store { pool: pool.clone(), block_certificates: block_certs_send, - batch_certificates: batch_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), batches_persisted: batches_persisted.subscribe(), @@ -114,7 +109,6 @@ impl Store { blocks_persisted: PersistedBlockState(blocks_persisted), batches_persisted, block_certificates: block_certs_recv, - batch_certificates: batch_certs_recv, }, )) } @@ -171,7 +165,6 @@ impl StoreRunner { blocks_persisted, batches_persisted, mut block_certificates, - mut batch_certificates, } = self; let res = scope::run!(ctx, |ctx, s| async { @@ -256,60 +249,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all)] - async fn insert_batch_certificates_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - batch_certificates: &mut ctx::channel::UnboundedReceiver, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - - let cert = batch_certificates - .recv(ctx) - .instrument(tracing::info_span!("wait_for_batch_certificate")) - .await?; - - loop { - use consensus_dal::InsertCertificateError as E; - // Try to insert the cert. - let res = pool - .connection(ctx) - .await? - .insert_batch_certificate(ctx, &cert) - .await; - - match res { - Ok(()) => { - break; - } - Err(InsertCertificateError::Inner(E::MissingPayload)) => { - // The L1 batch isn't available yet. - // We can wait until it's produced/received, or we could modify gossip - // so that we don't even accept votes until we have the corresponding batch. - ctx.sleep(POLL_INTERVAL) - .instrument(tracing::info_span!("wait_for_batch")) - .await?; - } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } - Err(InsertCertificateError::Canceled(err)) => { - return Err(ctx::Error::Canceled(err)) - } - } - } - - Ok(()) - } - - s.spawn::<()>(async { - // Loop inserting batch certificates into storage - loop { - insert_batch_certificates_iteration(ctx, &pool, &mut batch_certificates) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -523,39 +462,6 @@ impl storage::PersistentBatchStore for Store { self.batches_persisted.clone() } - /// Get the next L1 batch number which has to be signed by attesters. - async fn next_batch_to_attest( - &self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(self - .conn(ctx) - .await? - .attestation_status(ctx) - .await - .wrap("next_batch_to_attest")? - .map(|s| s.next_batch_to_attest)) - } - - /// Get the L1 batch QC from storage with the highest number. - /// - /// This might have gaps before it. Until there is a way to catch up with missing - /// certificates by fetching from the main node, returning the last inserted one - /// is the best we can do. - async fn last_batch_qc(&self, ctx: &ctx::Ctx) -> ctx::Result> { - let Some(number) = self - .conn(ctx) - .await? - .get_last_batch_certificate_number(ctx) - .await - .wrap("get_last_batch_certificate_number")? - else { - return Ok(None); - }; - - self.get_batch_qc(ctx, number).await - } - /// Returns the batch with the given number. async fn get_batch( &self, @@ -569,54 +475,6 @@ impl storage::PersistentBatchStore for Store { .wrap("get_batch") } - /// Returns the [attester::Batch] with the given number, which is the `message` that - /// appears in [attester::BatchQC], and represents the content that needs to be signed - /// by the attesters. - async fn get_batch_to_sign( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let mut conn = self.conn(ctx).await?; - - let Some(hash) = conn.batch_hash(ctx, number).await.wrap("batch_hash()")? else { - return Ok(None); - }; - - let Some(genesis) = conn.genesis(ctx).await.wrap("genesis()")? else { - return Ok(None); - }; - - Ok(Some(attester::Batch { - number, - hash, - genesis: genesis.hash(), - })) - } - - /// Returns the QC of the batch with the given number. - async fn get_batch_qc( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .batch_certificate(ctx, number) - .await - .wrap("batch_certificate") - } - - /// Store the given QC in the storage. - /// - /// Storing a QC is allowed even if it creates a gap in the L1 batch history. - /// If we need the last batch QC that still needs to be signed then the queries need to look for gaps. - async fn store_qc(&self, _ctx: &ctx::Ctx, qc: attester::BatchQC) -> ctx::Result<()> { - // Storing asynchronously because we might get the QC before the L1 batch itself. - self.batch_certificates.send(qc); - Ok(()) - } - /// Queue the batch to be persisted in storage. /// /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index c73d20982c1..5d1279afbbf 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -2,7 +2,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{attester, validator}; use zksync_contracts::BaseSystemContracts; use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; @@ -12,7 +12,41 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; -use super::ConnectionPool; +use super::{Connection, ConnectionPool}; + +impl Connection<'_> { + /// Wrapper for `consensus_dal().batch_of_block()`. + pub async fn batch_of_block( + &mut self, + ctx: &ctx::Ctx, + block: validator::BlockNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().batch_of_block(block)) + .await??) + } + + /// Wrapper for `consensus_dal().last_batch_certificate_number()`. + pub async fn last_batch_certificate_number( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().last_batch_certificate_number()) + .await??) + } + + /// Wrapper for `consensus_dal().batch_certificate()`. + pub async fn batch_certificate( + &mut self, + ctx: &ctx::Ctx, + number: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().batch_certificate(number)) + .await??) + } +} pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> GenesisParams { let mut cfg = mock_genesis_config(); @@ -161,6 +195,57 @@ impl ConnectionPool { Ok(blocks) } + pub async fn wait_for_batch_certificates_and_verify( + &self, + ctx: &ctx::Ctx, + want_last: attester::BatchNumber, + ) -> ctx::Result<()> { + // Wait for the last batch to be attested. + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + while self + .connection(ctx) + .await + .wrap("connection()")? + .last_batch_certificate_number(ctx) + .await + .wrap("last_batch_certificate_number()")? + .map_or(true, |got| got < want_last) + { + ctx.sleep(POLL_INTERVAL).await?; + } + let mut conn = self.connection(ctx).await.wrap("connection()")?; + let genesis = conn + .genesis(ctx) + .await + .wrap("genesis()")? + .context("genesis is missing")?; + let first = conn + .batch_of_block(ctx, genesis.first_block) + .await + .wrap("batch_of_block()")? + .context("batch of first_block is missing")?; + let committee = genesis.attesters.as_ref().unwrap(); + for i in first.0..want_last.0 { + let i = attester::BatchNumber(i); + let hash = conn + .batch_hash(ctx, i) + .await + .wrap("batch_hash()")? + .context("hash missing")?; + let cert = conn + .batch_certificate(ctx, i) + .await + .wrap("batch_certificate")? + .context("cert missing")?; + if cert.message.hash != hash { + return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); + } + cert.verify(genesis.hash(), committee) + .context("cert[{i:?}].verify()")?; + } + Ok(()) + } + pub async fn prune_batches( &self, ctx: &ctx::Ctx, diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 9cf06b992e8..0537aaabc56 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -14,7 +14,7 @@ use zksync_config::{ }; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_metadata_calculator::{ @@ -72,55 +72,105 @@ pub(super) struct StateKeeper { tree_reader: LazyAsyncTreeReader, } -pub(super) fn config(cfg: &network::Config) -> (config::ConsensusConfig, config::ConsensusSecrets) { - ( - config::ConsensusConfig { - server_addr: *cfg.server_addr, - public_addr: config::Host(cfg.public_addr.0.clone()), - max_payload_size: usize::MAX, - max_batch_size: usize::MAX, - gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, - gossip_static_inbound: cfg - .gossip - .static_inbound - .iter() - .map(|k| config::NodePublicKey(k.encode())) - .collect(), - gossip_static_outbound: cfg - .gossip - .static_outbound - .iter() - .map(|(k, v)| (config::NodePublicKey(k.encode()), config::Host(v.0.clone()))) - .collect(), - genesis_spec: cfg.validator_key.as_ref().map(|key| config::GenesisSpec { - chain_id: L2ChainId::default(), - protocol_version: config::ProtocolVersion(validator::ProtocolVersion::CURRENT.0), - validators: vec![config::WeightedValidator { - key: config::ValidatorPublicKey(key.public().encode()), - weight: 1, - }], - // We only have access to the main node attester key in the `cfg`, which is fine - // for validators because at the moment there is only one leader. It doesn't - // allow us to form a full attester committee. However in the current tests - // the `new_configs` used to produce the array of `network::Config` doesn't - // assign an attester key, so it doesn't matter. - attesters: Vec::new(), - leader: config::ValidatorPublicKey(key.public().encode()), - }), - rpc: None, - }, - config::ConsensusSecrets { - node_key: Some(config::NodeSecretKey(cfg.gossip.key.encode().into())), - validator_key: cfg - .validator_key - .as_ref() - .map(|k| config::ValidatorSecretKey(k.encode().into())), - attester_key: cfg - .attester_key - .as_ref() - .map(|k| config::AttesterSecretKey(k.encode().into())), - }, - ) +#[derive(Clone)] +pub(super) struct ConfigSet { + net: network::Config, + pub(super) config: config::ConsensusConfig, + pub(super) secrets: config::ConsensusSecrets, +} + +impl ConfigSet { + pub(super) fn new_fullnode(&self, rng: &mut impl Rng) -> ConfigSet { + let net = network::testonly::new_fullnode(rng, &self.net); + ConfigSet { + config: make_config(&net, None), + secrets: make_secrets(&net, None), + net, + } + } +} + +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + gossip_peers: usize, +) -> Vec { + let genesis_spec = config::GenesisSpec { + chain_id: setup.genesis.chain_id.0.try_into().unwrap(), + protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), + validators: setup + .validator_keys + .iter() + .map(|k| config::WeightedValidator { + key: config::ValidatorPublicKey(k.public().encode()), + weight: 1, + }) + .collect(), + attesters: setup + .attester_keys + .iter() + .map(|k| config::WeightedAttester { + key: config::AttesterPublicKey(k.public().encode()), + weight: 1, + }) + .collect(), + leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + }; + network::testonly::new_configs(rng, setup, gossip_peers) + .into_iter() + .enumerate() + .map(|(i, net)| ConfigSet { + config: make_config(&net, Some(genesis_spec.clone())), + secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), + net, + }) + .collect() +} + +fn make_secrets( + cfg: &network::Config, + attester_key: Option, +) -> config::ConsensusSecrets { + config::ConsensusSecrets { + node_key: Some(config::NodeSecretKey(cfg.gossip.key.encode().into())), + validator_key: cfg + .validator_key + .as_ref() + .map(|k| config::ValidatorSecretKey(k.encode().into())), + attester_key: attester_key.map(|k| config::AttesterSecretKey(k.encode().into())), + } +} + +fn make_config( + cfg: &network::Config, + genesis_spec: Option, +) -> config::ConsensusConfig { + config::ConsensusConfig { + server_addr: *cfg.server_addr, + public_addr: config::Host(cfg.public_addr.0.clone()), + max_payload_size: usize::MAX, + max_batch_size: usize::MAX, + gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, + gossip_static_inbound: cfg + .gossip + .static_inbound + .iter() + .map(|k| config::NodePublicKey(k.encode())) + .collect(), + gossip_static_outbound: cfg + .gossip + .static_outbound + .iter() + .map(|(k, v)| (config::NodePublicKey(k.encode()), config::Host(v.0.clone()))) + .collect(), + // This is only relevant for the main node, which populates the genesis on the first run. + // Note that the spec doesn't match 100% the genesis provided. + // That's because not all genesis setups are currently supported in zksync-era. + // TODO: this might be misleading, so it would be better to write some more custom + // genesis generator for zksync-era tests. + genesis_spec, + rpc: None, + } } /// Fake StateKeeper task to be executed in the background. @@ -393,15 +443,14 @@ impl StateKeeper { self, ctx: &ctx::Ctx, client: Box>, - cfg: &network::Config, + cfgs: ConfigSet, ) -> anyhow::Result<()> { - let (cfg, secrets) = config(cfg); en::EN { pool: self.pool, client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfg, secrets) + .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) .await } } diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs new file mode 100644 index 00000000000..b245d0524aa --- /dev/null +++ b/core/node/consensus/src/tests/attestation.rs @@ -0,0 +1,166 @@ +use anyhow::Context as _; +use test_casing::{test_casing, Product}; +use tracing::Instrument as _; +use zksync_concurrency::{ctx, error::Wrap, scope}; +use zksync_consensus_roles::{ + attester, + validator::testonly::{Setup, SetupSpec}, +}; +use zksync_dal::consensus_dal::AttestationStatus; +use zksync_node_sync::MainNodeClient; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; + +use super::{FROM_SNAPSHOT, VERSIONS}; +use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_attestation_status_api(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, version).await; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); + + // Setup nontrivial genesis. + while sk.last_sealed_batch() < L1BatchNumber(3) { + sk.push_random_blocks(rng, 10).await; + } + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = sk.last_block(); + let first_batch = sk.last_batch(); + let setup = Setup::from(setup); + let mut conn = pool.connection(ctx).await.wrap("connection()")?; + conn.try_update_genesis(ctx, &setup.genesis) + .await + .wrap("try_update_genesis()")?; + // Make sure that the first_batch is actually sealed. + sk.seal_batch().await; + pool.wait_for_batch(ctx, first_batch).await?; + + // Connect to API endpoint. + let api = sk.connect(ctx).await?; + let fetch_status = || async { + let s = api + .fetch_attestation_status() + .await? + .context("no attestation_status")?; + let s: AttestationStatus = + zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; + anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); + Ok(s) + }; + + // If the main node has no L1 batch certificates, + // then the first one to sign should be the batch with the `genesis.first_block`. + let status = fetch_status().await?; + assert_eq!( + status.next_batch_to_attest, + attester::BatchNumber(first_batch.0.into()) + ); + + // Insert a (fake) cert, then check again. + { + let mut conn = pool.connection(ctx).await?; + let number = status.next_batch_to_attest; + let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let cert = attester::BatchQC { + signatures: attester::MultiSig::default(), + message: attester::Batch { + number, + hash, + genesis, + }, + }; + conn.insert_batch_certificate(ctx, &cert) + .await + .context("insert_batch_certificate()")?; + } + let want = status.next_batch_to_attest.next(); + let got = fetch_status().await?; + assert_eq!(want, got.next_batch_to_attest); + + Ok(()) + }) + .await + .unwrap(); +} + +// Test running a couple of attesters (which are also validators). +// Main node is expected to collect all certificates. +// External nodes are expected to just vote for the batch. +// +// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, +// however as of now it doesn't work with ENs and it doesn't work with +// `ConnectionPool::from_snapshot`. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { + const NODES: usize = 4; + + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 4); + let cfgs = testonly::new_configs(rng, &setup, NODES); + + scope::run!(ctx, |ctx, s| async { + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("validator")) + .await + .context("validator") + }); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + validator_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + + tracing::info!("Run validator."); + s.spawn_bg(run_main_node( + ctx, + cfgs[0].config.clone(), + cfgs[0].secrets.clone(), + validator_pool.clone(), + )); + + tracing::info!("Run nodes."); + let mut node_pools = vec![]; + for (i, cfg) in cfgs[1..].iter().enumerate() { + let i = ctx::NoCopy(i); + let pool = ConnectionPool::test(from_snapshot, version).await; + let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + node_pools.push(pool.clone()); + s.spawn_bg(async { + let i = i; + runner + .run(ctx) + .instrument(tracing::info_span!("node", i = *i)) + .await + .with_context(|| format!("node{}", *i)) + }); + s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); + } + + tracing::info!("Create some batches"); + validator.push_random_blocks(rng, 20).await; + validator.seal_batch().await; + tracing::info!("Wait for the batches to be attested"); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + validator_pool + .wait_for_batch_certificates_and_verify(ctx, want_last) + .await?; + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs new file mode 100644 index 00000000000..41d73fdb87c --- /dev/null +++ b/core/node/consensus/src/tests/batch.rs @@ -0,0 +1,120 @@ +use test_casing::{test_casing, Product}; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::validator; +use zksync_types::{L1BatchNumber, ProtocolVersionId}; + +use super::{FROM_SNAPSHOT, VERSIONS}; +use crate::{storage::ConnectionPool, testonly}; + +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let pool = ConnectionPool::test(from_snapshot, version).await; + + // Fill storage with unsigned L2 blocks and L1 batches in a way that the + // last L1 batch is guaranteed to have some L2 blocks executed in it. + scope::run!(ctx, |ctx, s| async { + // Start state keeper. + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + for _ in 0..3 { + for _ in 0..2 { + sk.push_random_block(rng).await; + } + sk.seal_batch().await; + } + sk.push_random_block(rng).await; + + pool.wait_for_payload(ctx, sk.last_block()).await?; + + Ok(()) + }) + .await + .unwrap(); + + // Now we can try to retrieve the batch. + scope::run!(ctx, |ctx, _s| async { + let mut conn = pool.connection(ctx).await?; + let batches = conn.batches_range(ctx).await?; + let last = batches.last.expect("last is set"); + let (min, max) = conn + .get_l2_block_range_of_l1_batch(ctx, last) + .await? + .unwrap(); + + let last_batch = conn + .get_batch(ctx, last) + .await? + .expect("last batch can be retrieved"); + + assert_eq!( + last_batch.payloads.len(), + (max.0 - min.0) as usize, + "all block payloads present" + ); + + let first_payload = last_batch + .payloads + .first() + .expect("last batch has payloads"); + + let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); + let want_payload = want_payload.encode(); + + assert_eq!( + first_payload, &want_payload, + "first payload is the right number" + ); + + anyhow::Ok(()) + }) + .await + .unwrap(); +} + +/// Tests that generated L1 batch witnesses can be verified successfully. +/// TODO: add tests for verification failures. +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_batch_witness(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::from_genesis(version).await; + let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx)); + + tracing::info!("analyzing storage"); + { + let mut conn = pool.connection(ctx).await.unwrap(); + let mut n = validator::BlockNumber(0); + while let Some(p) = conn.payload(ctx, n).await? { + tracing::info!("block[{n}] = {p:?}"); + n = n + 1; + } + } + + // Seal a bunch of batches. + node.push_random_blocks(rng, 10).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; + // We can verify only 2nd batch onward, because + // batch witness verifies parent of the last block of the + // previous batch (and 0th batch contains only 1 block). + for n in 2..=node.last_sealed_batch().0 { + let n = L1BatchNumber(n); + let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; + let commit = node.load_batch_commit(ctx, n).await?; + batch_with_witness.verify(&commit)?; + } + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/tests.rs b/core/node/consensus/src/tests/mod.rs similarity index 69% rename from core/node/consensus/src/tests.rs rename to core/node/consensus/src/tests/mod.rs index 8e1594393ea..0b611d55f06 100644 --- a/core/node/consensus/src/tests.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,17 +2,12 @@ use anyhow::Context as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; -use zksync_config::configs::consensus::{ValidatorPublicKey, WeightedValidator}; -use zksync_consensus_crypto::TextFmt as _; -use zksync_consensus_network::testonly::{new_configs, new_fullnode}; use zksync_consensus_roles::{ - attester, validator, + validator, validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use crate::{ mn::run_main_node, @@ -20,6 +15,9 @@ use crate::{ testonly, }; +mod attestation; +mod batch; + const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; @@ -86,76 +84,6 @@ async fn test_validator_block_store(version: ProtocolVersionId) { } } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. @@ -166,7 +94,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfgs = new_configs(rng, &setup, 0); + let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -187,8 +115,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { scope::run!(ctx, |ctx, s| async { tracing::info!("Start consensus actor"); // In the first iteration it will initialize genesis. - let (cfg,secrets) = testonly::config(&cfgs[0]); - s.spawn_bg(run_main_node(ctx, cfg, secrets, pool.clone())); + s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); sk.push_random_blocks(rng, 3).await; @@ -230,7 +157,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0).pop().unwrap(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -238,8 +165,12 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); - let (cfg, secrets) = testonly::config(&validator_cfg); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); tracing::info!("produce some batches"); validator.push_random_blocks(rng, 5).await; @@ -255,8 +186,8 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node1"))); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - let cfg = new_fullnode(&mut ctx.rng(), &validator_cfg); - node.run_consensus(ctx, conn, &cfg).await + let cfg = validator_cfg.new_fullnode(&mut ctx.rng()); + node.run_consensus(ctx, conn, cfg).await }); tracing::info!("produce more batches"); @@ -273,8 +204,8 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node2"))); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - let cfg = new_fullnode(&mut ctx.rng(), &validator_cfg); - node.run_consensus(ctx, conn, &cfg).await + let cfg = validator_cfg.new_fullnode(&mut ctx.rng()); + node.run_consensus(ctx, conn, cfg).await }); tracing::info!("produce more blocks and compare storages"); @@ -311,16 +242,13 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfgs = new_configs(rng, &setup, 0); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); // topology: // validator <-> node <-> node <-> ... let mut node_cfgs = vec![]; for _ in 0..NODES { - node_cfgs.push(new_fullnode( - rng, - node_cfgs.last().unwrap_or(&validator_cfgs[0]), - )); + node_cfgs.push(node_cfgs.last().unwrap_or(&validator_cfg).new_fullnode(rng)); } // Run validator and fetchers in parallel. @@ -344,8 +272,12 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .await?; tracing::info!("Run validator."); - let (cfg, secrets) = testonly::config(&validator_cfgs[0]); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); tracing::info!("Run nodes."); let mut node_pools = vec![]; @@ -362,7 +294,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .await .with_context(|| format!("node{}", *i)) }); - s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg)); + s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } tracing::info!("Make validator produce blocks and wait for fetchers to get them."); @@ -395,7 +327,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -423,16 +355,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.connect(ctx).await?; tracing::info!("Run main node with all nodes being validators."); - let (mut cfg, secrets) = testonly::config(&cfgs[0]); - cfg.genesis_spec.as_mut().unwrap().validators = setup - .validator_keys - .iter() - .map(|k| WeightedValidator { - key: ValidatorPublicKey(k.public().encode()), - weight: 1, - }) - .collect(); - s.spawn_bg(run_main_node(ctx, cfg, secrets, main_node_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + cfgs[0].config.clone(), + cfgs[0].secrets.clone(), + main_node_pool.clone(), + )); tracing::info!("Run external nodes."); let mut ext_node_pools = vec![]; @@ -449,7 +377,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .await .with_context(|| format!("en{}", *i)) }); - s.spawn_bg(ext_node.run_consensus(ctx, main_node.connect(ctx).await?, cfg)); + s.spawn_bg(ext_node.run_consensus(ctx, main_node.connect(ctx).await?, cfg.clone())); } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); @@ -479,8 +407,8 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0)[0].clone(); - let node_cfg = new_fullnode(rng, &validator_cfg); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -488,8 +416,12 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (mut validator, runner) = testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - let (cfg, secrets) = testonly::config(&validator_cfg); - s.spawn_bg(run_main_node(ctx, cfg, secrets, validator_pool.clone())); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); // API server needs at least 1 L1 batch to start. validator.seal_batch().await; let client = validator.connect(ctx).await?; @@ -500,7 +432,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); validator.push_random_blocks(rng, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -528,7 +460,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), &node_cfg)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) @@ -554,8 +486,8 @@ async fn test_with_pruning(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = new_configs(rng, &setup, 0)[0].clone(); - let node_cfg = new_fullnode(rng, &validator_cfg); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -569,16 +501,20 @@ async fn test_with_pruning(version: ProtocolVersionId) { .context("validator") }); tracing::info!("Run validator."); - let (cfg, secrets) = testonly::config(&validator_cfg); s.spawn_bg({ let validator_pool = validator_pool.clone(); async { - run_main_node(ctx, cfg, secrets, validator_pool) - .await - .context("run_main_node()") + run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool, + ) + .await + .context("run_main_node()") } }); - // TODO: ensure at least L1 batch in `testonly::StateKeeper::new()` to make it fool proof. + // TODO: ensure at least 1 L1 batch in `testonly::StateKeeper::new()` to make it fool proof. validator.seal_batch().await; tracing::info!("Run node."); @@ -593,7 +529,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); let conn = validator.connect(ctx).await?; s.spawn_bg(async { - node.run_consensus(ctx, conn, &node_cfg) + node.run_consensus(ctx, conn, node_cfg) .await .context("run_consensus()") }); @@ -678,123 +614,3 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI .await .unwrap(); } - -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_attestation_status_api(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); - - // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; - } - let mut setup = SetupSpec::new(rng, 3); - setup.first_block = sk.last_block(); - let first_batch = sk.last_batch(); - let setup = Setup::from(setup); - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; - // Make sure that the first_batch is actually sealed. - sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; - - // Connect to API endpoint. - let api = sk.connect(ctx).await?; - let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? - .context("no attestation_status")?; - let s: AttestationStatus = - zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; - anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); - Ok(s) - }; - - // If the main node has no L1 batch certificates, - // then the first one to sign should be the batch with the `genesis.first_block`. - let status = fetch_status().await?; - assert_eq!( - status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) - ); - - // Insert a (fake) cert, then check again. - { - let mut conn = pool.connection(ctx).await?; - let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); - let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, - }; - conn.insert_batch_certificate(ctx, &cert) - .await - .context("insert_batch_certificate()")?; - } - let want = status.next_batch_to_attest.next(); - let got = fetch_status().await?; - assert_eq!(want, got.next_batch_to_attest); - - Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 582f15637b5..5ac79d1dd0f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7570,9 +7570,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -7606,9 +7606,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0883af373e9198fd27c0148e7e47b37f912cb4b444bec3f7eed0af0b0dfc69" +checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" dependencies = [ "anyhow", "blst", @@ -7630,9 +7630,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e426aa7c68a12dde702c3ec4ef49de24d9054ef908384232b7887e043ca3f2fe" +checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" dependencies = [ "anyhow", "bit-vec", @@ -7652,9 +7652,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8388c33fd5bc3725e58c26db2d3016538c6221c6448b3e92cf5df07f6074a028" +checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" dependencies = [ "anyhow", "async-trait", @@ -7672,9 +7672,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand 0.8.5", @@ -7984,9 +7984,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -8005,9 +8005,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 04a29f5b0f4..41b972a4cef 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6337,9 +6337,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a463106f37cfa589896e6a165b5bb0533013377990e19f10e8c4894346a62e8b" +checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" dependencies = [ "anyhow", "once_cell", @@ -6371,9 +6371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612920e56dcb99f227bc23e1254f4dabc7cb4c5cd1a9ec400ceba0ec6fa77c1e" +checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" dependencies = [ "anyhow", "rand", @@ -6422,9 +6422,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0d82fd63f27681b9c01f0e01e3060e71b72809db8e21d9130663ee92bd1e391" +checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" dependencies = [ "anyhow", "bit-vec", @@ -6443,9 +6443,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.10" +version = "0.1.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3c158ab4d211053886371d4a00514bdf8ebdf826d40ee03b98fee2e0d1605e" +checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index ab850d82770..ef2aed7c99c 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.10" +zksync_protobuf = "=0.1.0-rc.11" # External dependencies anyhow = "1.0.82"