From 05de258ad636c216aea4b5129ae78580182654b8 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 22 Feb 2024 12:50:04 +0100 Subject: [PATCH] applied comments --- node/actors/bft/src/leader/tests.rs | 1 + node/actors/bft/src/replica/leader_prepare.rs | 2 +- node/actors/executor/src/tests.rs | 107 +----------------- node/actors/network/src/mux/tests/mod.rs | 4 - node/actors/sync_blocks/src/peers/mod.rs | 3 +- .../actors/sync_blocks/src/peers/tests/mod.rs | 2 +- node/libs/roles/src/validator/conv.rs | 6 +- .../src/validator/messages/leader_commit.rs | 9 +- .../src/validator/messages/leader_prepare.rs | 12 +- .../src/validator/messages/replica_prepare.rs | 6 +- node/libs/roles/src/validator/testonly.rs | 9 +- node/libs/storage/src/block_store/mod.rs | 32 +++--- node/libs/storage/src/testonly/in_memory.rs | 14 --- node/tools/src/proto/mod.proto | 21 ++-- 14 files changed, 46 insertions(+), 182 deletions(-) diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index dde39e3a..b74e4411 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -29,6 +29,7 @@ async fn replica_prepare_sanity_yield_leader_prepare() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); + util.produce_block(ctx).await; let replica_prepare = util.new_replica_prepare(); let leader_prepare = util .process_replica_prepare(ctx, util.sign(replica_prepare.clone())) diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index 5c6db1ae..7e017bcb 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -110,7 +110,7 @@ impl StateMachine { }); } - // ----------- Checking the the message -------------- + // ----------- Checking the message -------------- signed_message.verify().map_err(Error::InvalidSignature)?; message diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 8fa5ba51..f95b6641 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -1,18 +1,12 @@ //! High-level tests for `Executor`. - use super::*; -use tracing::Instrument as _; -use zksync_concurrency::{ - testonly::{abort_on_panic, set_timeout}, - time, -}; +use zksync_concurrency::testonly::abort_on_panic; use zksync_consensus_bft as bft; use zksync_consensus_network::testonly::{new_configs, new_fullnode}; use zksync_consensus_roles::validator::{testonly::Setup, BlockNumber}; use zksync_consensus_storage::{ - self as storage, testonly::{in_memory, new_store}, - BlockStore, PersistentBlockStore as _, + BlockStore, }; fn make_executor(cfg: &network::Config, block_store: Arc) -> Executor { @@ -80,100 +74,3 @@ async fn executing_validator_and_full_node() { .await .unwrap(); } - -/* -/// * finalize some blocks -/// * revert bunch of blocks -/// * restart validators and make sure that new blocks get produced -/// * start additional full node to make sure that it can sync blocks from before the fork -#[tokio::test] -async fn test_block_revert() { - abort_on_panic(); - let _guard = set_timeout(time::Duration::seconds(10)); - - let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); - let rng = &mut ctx.rng(); - - let setup = Setup::new(rng, 2); - let first = setup.next(); - let cfgs = new_configs(rng, &setup, 1); - // Persistent stores for the validators. - let mut persistent_stores: Vec<_> = cfgs - .iter() - .map(|_| in_memory::BlockStore::new(setup.genesis.clone())) - .collect(); - - // Make validators produce some blocks. - scope::run!(ctx, |ctx, s| async { - let mut stores = vec![]; - for i in 0..cfgs.len() { - let (store, runner) = BlockStore::new(ctx, Box::new(persistent_stores[i].clone())) - .await - .unwrap(); - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(make_executor(&cfgs[i], store.clone()).run(ctx)); - stores.push(store); - } - for s in stores { - s.wait_until_persisted(ctx, BlockNumber(first.0 + 6)) - .await?; - } - Ok(()) - }) - .await - .unwrap(); - - tracing::info!("Revert blocks"); - let first = BlockNumber(first.0 + 3); - let fork = validator::Fork { - number: setup.genesis.fork.number.next(), - first_block: first, - first_parent: persistent_stores[0] - .block(ctx, first) - .await - .unwrap() - .header() - .parent, - }; - let mut genesis = setup.genesis.clone(); - genesis.fork = fork.clone(); - // Update configs and persistent storage. - for store in &mut persistent_stores { - *store = store.fork(fork.clone()).unwrap(); - } - - let last_block = BlockNumber(first.0 + 8); - scope::run!(ctx, |ctx, s| async { - tracing::info!("Make validators produce blocks on the new fork."); - let mut stores = vec![]; - for i in 0..cfgs.len() { - let (store, runner) = BlockStore::new(ctx, Box::new(persistent_stores[i].clone())) - .await - .unwrap(); - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - s.spawn_bg( - make_executor(&cfgs[i], store.clone()) - .run(ctx) - .instrument(tracing::info_span!("node", i)), - ); - stores.push(store); - } - - tracing::info!("Spawn a new node with should fetch blocks from both new and old fork"); - let (store, runner) = new_store(ctx, &genesis).await; - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("fullnode"))); - s.spawn_bg( - make_executor(&new_fullnode(rng, &cfgs[0]), store.clone()) - .run(ctx) - .instrument(tracing::info_span!("fullnode")), - ); - store.wait_until_persisted(ctx, last_block).await?; - storage::testonly::verify(ctx, &store) - .await - .context("verify(storage)")?; - Ok(()) - }) - .await - .unwrap(); -} -*/ diff --git a/node/actors/network/src/mux/tests/mod.rs b/node/actors/network/src/mux/tests/mod.rs index 1d6039e3..0c43d0ea 100644 --- a/node/actors/network/src/mux/tests/mod.rs +++ b/node/actors/network/src/mux/tests/mod.rs @@ -192,10 +192,6 @@ fn expected(res: Result<(), mux::RunError>) -> Result<(), mux::RunError> { // * multiple capabilities are used at the same time. // * ends use totally different configs // * messages are larger than frames -// -// TODO(gprusak): in case the test fails it may be hard to find the actual bug, because -// this test covers a lot of features. In such situation more specific tests -// checking 1 property at a time should be added. #[test] fn mux_with_noise() { abort_on_panic(); diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index 53308995..f57ea8bd 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -73,8 +73,7 @@ impl PeerStates { let Some(last) = &state.last else { return Ok(()); }; - last.verify(self.genesis()) - .context("state.last.verify()")?; + last.verify(self.genesis()).context("state.last.verify()")?; let mut peers = self.peers.lock().unwrap(); match peers.entry(peer.clone()) { Entry::Occupied(mut e) => e.get_mut().state = state.clone(), diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs index 84176ea6..d6c9d66d 100644 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ b/node/actors/sync_blocks/src/peers/tests/mod.rs @@ -69,7 +69,7 @@ async fn test_peer_states(test: T) { let ctx = &ctx::test_root(&clock); let rng = &mut ctx.rng(); let mut setup = validator::testonly::Setup::new(rng, 4); - setup.push_blocks(rng,T::BLOCK_COUNT); + setup.push_blocks(rng, T::BLOCK_COUNT); let (store, store_run) = new_store(ctx, &setup.genesis).await; test.initialize_storage(ctx, store.as_ref(), &setup).await; diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 95edb711..04886834 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,8 +1,8 @@ use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, - MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, - ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, + NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, + ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, }; use crate::{node::SessionId, proto::validator as proto}; use anyhow::Context as _; diff --git a/node/libs/roles/src/validator/messages/leader_commit.rs b/node/libs/roles/src/validator/messages/leader_commit.rs index 9df90354..f9eb7aa9 100644 --- a/node/libs/roles/src/validator/messages/leader_commit.rs +++ b/node/libs/roles/src/validator/messages/leader_commit.rs @@ -11,8 +11,7 @@ pub struct LeaderCommit { impl LeaderCommit { /// Verifies LeaderCommit. pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> { - self.justification - .verify(genesis) + self.justification.verify(genesis) } /// View of this message. @@ -76,6 +75,7 @@ impl CommitQC { } /// Add a validator's signature. + /// Signature is assumed to be already verified. pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { if self.message != msg.msg { return; @@ -91,10 +91,7 @@ impl CommitQC { } /// Verifies the signature of the CommitQC. - pub fn verify( - &self, - genesis: &Genesis, - ) -> Result<(), CommitQCVerifyError> { + pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> { use CommitQCVerifyError as Error; self.message .verify(genesis) diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs index 9288a26c..15580bb3 100644 --- a/node/libs/roles/src/validator/messages/leader_prepare.rs +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -76,11 +76,10 @@ impl PrepareQC { } /// Add a validator's signed message. - /// * `signed_message` - A valid signed `ReplicaPrepare` message. - /// * `validator_index` - The signer index in the validator set. + /// Message is assumed to be already verified. + // TODO: check if there is already a message from that validator. + // TODO: verify the message inside instead. pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { - // TODO: check if there is already a message from that validator. - // TODO: verify msg if msg.msg.view != self.view { return; } @@ -234,10 +233,7 @@ impl LeaderPrepare { } let (want_parent, want_number) = match high_qc { Some(qc) => (Some(qc.header().hash()), qc.header().number.next()), - None => ( - genesis.fork.first_parent, - genesis.fork.first_block, - ), + None => (genesis.fork.first_parent, genesis.fork.first_block), }; if self.proposal.parent != want_parent { return Err(Error::BadParentHash { diff --git a/node/libs/roles/src/validator/messages/replica_prepare.rs b/node/libs/roles/src/validator/messages/replica_prepare.rs index 8394b591..830c1ac0 100644 --- a/node/libs/roles/src/validator/messages/replica_prepare.rs +++ b/node/libs/roles/src/validator/messages/replica_prepare.rs @@ -50,15 +50,13 @@ impl ReplicaPrepare { if self.view.number <= v.view.number { return Err(Error::HighVoteFutureView); } - v.verify(genesis) - .map_err(Error::HighVote)?; + v.verify(genesis).map_err(Error::HighVote)?; } if let Some(qc) = &self.high_qc { if self.view.number <= qc.view().number { return Err(Error::HighQCFutureView); } - qc.verify(genesis) - .map_err(Error::HighQC)?; + qc.verify(genesis).map_err(Error::HighQC)?; } Ok(()) } diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index e5828a3d..27290f29 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,10 +1,9 @@ //! Test-only utilities. use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, - MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, - ReplicaCommit, ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View, - ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, + NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, + ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, }; use bit_vec::BitVec; use rand::{ @@ -41,7 +40,7 @@ impl Setup { first_block: BlockNumber(rng.gen_range(0..100)), first_parent: Some(rng.gen()), }; - Self::new_with_fork(rng,validators,fork) + Self::new_with_fork(rng, validators, fork) } /// Next block to finalize. diff --git a/node/libs/storage/src/block_store/mod.rs b/node/libs/storage/src/block_store/mod.rs index fd72deec..456bc2e1 100644 --- a/node/libs/storage/src/block_store/mod.rs +++ b/node/libs/storage/src/block_store/mod.rs @@ -149,8 +149,7 @@ impl BlockStore { let last = persistent.last(ctx).await.wrap("persistent.last()")?; t.observe(); if let Some(last) = &last { - last.verify(&genesis) - .context("last.verify()")?; + last.verify(&genesis).context("last.verify()")?; } let state = BlockStoreState { first: genesis.fork.first_block, @@ -168,7 +167,9 @@ impl BlockStore { }); // Verify the first block. if let Some(block) = this.block(ctx, this.genesis.fork.first_block).await? { - block.verify(&this.genesis).with_context(|| format!("verify({:?})", this.genesis.fork.first_block))?; + block + .verify(&this.genesis) + .with_context(|| format!("verify({:?})", this.genesis.fork.first_block))?; } Ok((this.clone(), BlockStoreRunner(this))) } @@ -217,25 +218,26 @@ impl BlockStore { ctx: &ctx::Ctx, block: validator::FinalBlock, ) -> ctx::Result<()> { - let number = block.header().number; + let number = block.number(); { let sub = &mut self.subscribe(); let queued_state = sync::wait_for(ctx, sub, |queued_state| queued_state.next() >= number).await?; - if queued_state.next() != number { + if queued_state.next() > number { return Ok(()); } - if queued_state.last.as_ref().map(|qc| qc.header().hash()) != block.header().parent { - return Err( - anyhow::format_err!("block.parent doesn't match the previous block").into(), - ); + block.verify(&self.genesis).context("block.verify()")?; + // Verify parent hash, if previous block is available. + if let Some(last) = queued_state.last.as_ref() { + if Some(last.header().hash()) != block.header().parent { + return Err(anyhow::format_err!( + "block.parent = {:?}, want {:?}", + block.header().parent, + last.header().hash() + ) + .into()); + } } - // Verify the message without verifying the signatures, to avoid doing it twice. - // TODO(gprusak): consider moving the signature verification here for resiliency. - block - .justification - .message - .verify(&self.genesis)?; } self.inner.send_if_modified(|inner| { let modified = inner.queued_state.send_if_modified(|queued_state| { diff --git a/node/libs/storage/src/testonly/in_memory.rs b/node/libs/storage/src/testonly/in_memory.rs index ffdf1a66..209d046c 100644 --- a/node/libs/storage/src/testonly/in_memory.rs +++ b/node/libs/storage/src/testonly/in_memory.rs @@ -30,20 +30,6 @@ impl BlockStore { blocks: Mutex::default(), })) } - - /*/// Forks the storage. - pub fn fork(&self, fork: validator::Fork) -> anyhow::Result { - let mut genesis = self.0.genesis.clone(); - genesis.forks.push(fork)?; - let mut blocks = self.0.blocks.lock().unwrap().clone(); - if let Some(first) = blocks.front().map(|b| b.header().number.0) { - blocks.truncate(genesis.forks.current().first_block.0.saturating_sub(first) as usize); - } - Ok(Self(Arc::new(BlockStoreInner { - genesis, - blocks: Mutex::new(blocks), - }))) - }*/ } #[async_trait::async_trait] diff --git a/node/tools/src/proto/mod.proto b/node/tools/src/proto/mod.proto index 47badb76..5be829df 100644 --- a/node/tools/src/proto/mod.proto +++ b/node/tools/src/proto/mod.proto @@ -27,13 +27,6 @@ // NodePublicKey - public key of the node (gossip network participant) of the form "node:public::" // Currently only ed25519 signature scheme is supported for nodes. // example: "node:public:ed25519:d36607699a0a3fbe3de16947928cf299484219ff62ca20f387795b0859dbe501" -// -// FinalBlock - hex encoded serialized roles.validator.FinalBlock. -// Used to specify the genesis block of the chain. -// This blob of data is not customizable and we don't want to impose -// json-level backward compatibility on anything else but the configs. -// TODO(gprusak): either don't include it at all (derive genesis purely from -// the validator set) or move it to a separate config file. syntax = "proto3"; package zksync.tools; @@ -42,8 +35,8 @@ import "zksync/roles/validator.proto"; // (public key, ip address) of a gossip network node. message NodeAddr { - optional string key = 1; // [required] NodePublicKey - optional string addr = 2; // [required] IpAddr + optional string key = 1; // required; NodePublicKey + optional string addr = 2; // required; IpAddr } // Application configuration. @@ -52,15 +45,15 @@ message AppConfig { // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). - optional string server_addr = 1; // [required] IpAddr + optional string server_addr = 1; // required; IpAddr // Public IP:port to advertise, should forward to server_addr. - optional string public_addr = 2; // [required] IpAddr + optional string public_addr = 2; // required; IpAddr // IP:port to serve metrics data for scraping. // Use `0.0.0.0:` to listen on all network interfaces. // If not set, metrics data won't be served. - optional string metrics_server_addr = 3; // [optional] IpAddr + optional string metrics_server_addr = 3; // optional; IpAddr // Consensus @@ -68,13 +61,13 @@ message AppConfig { optional roles.validator.Genesis genesis = 4; // required // Maximal size of the block payload. - optional uint64 max_payload_size = 5; // [required] B + optional uint64 max_payload_size = 5; // required; bytes // Gossip network // Limit on the number of gossip network inbound connections outside // of the `gossip_static_inbound` set. - optional uint64 gossip_dynamic_inbound_limit = 6; // [required] + optional uint64 gossip_dynamic_inbound_limit = 6; // required // Inbound connections that should be unconditionally accepted on the gossip network. repeated string gossip_static_inbound = 7; // NodePublicKey // Outbound gossip network connections that the node should actively try to