Skip to content

Commit

Permalink
applied comments
Browse files Browse the repository at this point in the history
  • Loading branch information
pompon0 committed Feb 22, 2024
1 parent 11de1a4 commit 05de258
Show file tree
Hide file tree
Showing 14 changed files with 46 additions and 182 deletions.
1 change: 1 addition & 0 deletions node/actors/bft/src/leader/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ async fn replica_prepare_sanity_yield_leader_prepare() {
let (mut util, runner) = UTHarness::new(ctx, 1).await;
s.spawn_bg(runner.run(ctx));

util.produce_block(ctx).await;
let replica_prepare = util.new_replica_prepare();
let leader_prepare = util
.process_replica_prepare(ctx, util.sign(replica_prepare.clone()))
Expand Down
2 changes: 1 addition & 1 deletion node/actors/bft/src/replica/leader_prepare.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ impl StateMachine {
});
}

// ----------- Checking the the message --------------
// ----------- Checking the message --------------

signed_message.verify().map_err(Error::InvalidSignature)?;
message
Expand Down
107 changes: 2 additions & 105 deletions node/actors/executor/src/tests.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,12 @@
//! High-level tests for `Executor`.

use super::*;
use tracing::Instrument as _;
use zksync_concurrency::{
testonly::{abort_on_panic, set_timeout},
time,
};
use zksync_concurrency::testonly::abort_on_panic;
use zksync_consensus_bft as bft;
use zksync_consensus_network::testonly::{new_configs, new_fullnode};
use zksync_consensus_roles::validator::{testonly::Setup, BlockNumber};
use zksync_consensus_storage::{
self as storage,
testonly::{in_memory, new_store},
BlockStore, PersistentBlockStore as _,
BlockStore,
};

fn make_executor(cfg: &network::Config, block_store: Arc<BlockStore>) -> Executor {
Expand Down Expand Up @@ -80,100 +74,3 @@ async fn executing_validator_and_full_node() {
.await
.unwrap();
}

/*
/// * finalize some blocks
/// * revert bunch of blocks
/// * restart validators and make sure that new blocks get produced
/// * start additional full node to make sure that it can sync blocks from before the fork
#[tokio::test]
async fn test_block_revert() {
abort_on_panic();
let _guard = set_timeout(time::Duration::seconds(10));
let ctx = &ctx::test_root(&ctx::AffineClock::new(10.));
let rng = &mut ctx.rng();
let setup = Setup::new(rng, 2);
let first = setup.next();
let cfgs = new_configs(rng, &setup, 1);
// Persistent stores for the validators.
let mut persistent_stores: Vec<_> = cfgs
.iter()
.map(|_| in_memory::BlockStore::new(setup.genesis.clone()))
.collect();
// Make validators produce some blocks.
scope::run!(ctx, |ctx, s| async {
let mut stores = vec![];
for i in 0..cfgs.len() {
let (store, runner) = BlockStore::new(ctx, Box::new(persistent_stores[i].clone()))
.await
.unwrap();
s.spawn_bg(runner.run(ctx));
s.spawn_bg(make_executor(&cfgs[i], store.clone()).run(ctx));
stores.push(store);
}
for s in stores {
s.wait_until_persisted(ctx, BlockNumber(first.0 + 6))
.await?;
}
Ok(())
})
.await
.unwrap();
tracing::info!("Revert blocks");
let first = BlockNumber(first.0 + 3);
let fork = validator::Fork {
number: setup.genesis.fork.number.next(),
first_block: first,
first_parent: persistent_stores[0]
.block(ctx, first)
.await
.unwrap()
.header()
.parent,
};
let mut genesis = setup.genesis.clone();
genesis.fork = fork.clone();
// Update configs and persistent storage.
for store in &mut persistent_stores {
*store = store.fork(fork.clone()).unwrap();
}
let last_block = BlockNumber(first.0 + 8);
scope::run!(ctx, |ctx, s| async {
tracing::info!("Make validators produce blocks on the new fork.");
let mut stores = vec![];
for i in 0..cfgs.len() {
let (store, runner) = BlockStore::new(ctx, Box::new(persistent_stores[i].clone()))
.await
.unwrap();
s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i)));
s.spawn_bg(
make_executor(&cfgs[i], store.clone())
.run(ctx)
.instrument(tracing::info_span!("node", i)),
);
stores.push(store);
}
tracing::info!("Spawn a new node with should fetch blocks from both new and old fork");
let (store, runner) = new_store(ctx, &genesis).await;
s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("fullnode")));
s.spawn_bg(
make_executor(&new_fullnode(rng, &cfgs[0]), store.clone())
.run(ctx)
.instrument(tracing::info_span!("fullnode")),
);
store.wait_until_persisted(ctx, last_block).await?;
storage::testonly::verify(ctx, &store)
.await
.context("verify(storage)")?;
Ok(())
})
.await
.unwrap();
}
*/
4 changes: 0 additions & 4 deletions node/actors/network/src/mux/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,6 @@ fn expected(res: Result<(), mux::RunError>) -> Result<(), mux::RunError> {
// * multiple capabilities are used at the same time.
// * ends use totally different configs
// * messages are larger than frames
//
// TODO(gprusak): in case the test fails it may be hard to find the actual bug, because
// this test covers a lot of features. In such situation more specific tests
// checking 1 property at a time should be added.
#[test]
fn mux_with_noise() {
abort_on_panic();
Expand Down
3 changes: 1 addition & 2 deletions node/actors/sync_blocks/src/peers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,7 @@ impl PeerStates {
let Some(last) = &state.last else {
return Ok(());
};
last.verify(self.genesis())
.context("state.last.verify()")?;
last.verify(self.genesis()).context("state.last.verify()")?;
let mut peers = self.peers.lock().unwrap();
match peers.entry(peer.clone()) {
Entry::Occupied(mut e) => e.get_mut().state = state.clone(),
Expand Down
2 changes: 1 addition & 1 deletion node/actors/sync_blocks/src/peers/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ async fn test_peer_states<T: Test>(test: T) {
let ctx = &ctx::test_root(&clock);
let rng = &mut ctx.rng();
let mut setup = validator::testonly::Setup::new(rng, 4);
setup.push_blocks(rng,T::BLOCK_COUNT);
setup.push_blocks(rng, T::BLOCK_COUNT);
let (store, store_run) = new_store(ctx, &setup.genesis).await;
test.initialize_storage(ctx, store.as_ref(), &setup).await;

Expand Down
6 changes: 3 additions & 3 deletions node/libs/roles/src/validator/conv.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use super::{
AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg,
FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg,
MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey,
ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber,
FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash,
NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit,
ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber,
};
use crate::{node::SessionId, proto::validator as proto};
use anyhow::Context as _;
Expand Down
9 changes: 3 additions & 6 deletions node/libs/roles/src/validator/messages/leader_commit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@ pub struct LeaderCommit {
impl LeaderCommit {
/// Verifies LeaderCommit.
pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> {
self.justification
.verify(genesis)
self.justification.verify(genesis)
}

/// View of this message.
Expand Down Expand Up @@ -76,6 +75,7 @@ impl CommitQC {
}

/// Add a validator's signature.
/// Signature is assumed to be already verified.
pub fn add(&mut self, msg: &Signed<ReplicaCommit>, genesis: &Genesis) {
if self.message != msg.msg {
return;
Expand All @@ -91,10 +91,7 @@ impl CommitQC {
}

/// Verifies the signature of the CommitQC.
pub fn verify(
&self,
genesis: &Genesis,
) -> Result<(), CommitQCVerifyError> {
pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> {
use CommitQCVerifyError as Error;
self.message
.verify(genesis)
Expand Down
12 changes: 4 additions & 8 deletions node/libs/roles/src/validator/messages/leader_prepare.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,10 @@ impl PrepareQC {
}

/// Add a validator's signed message.
/// * `signed_message` - A valid signed `ReplicaPrepare` message.
/// * `validator_index` - The signer index in the validator set.
/// Message is assumed to be already verified.
// TODO: check if there is already a message from that validator.
// TODO: verify the message inside instead.
pub fn add(&mut self, msg: &Signed<ReplicaPrepare>, genesis: &Genesis) {
// TODO: check if there is already a message from that validator.
// TODO: verify msg
if msg.msg.view != self.view {
return;
}
Expand Down Expand Up @@ -234,10 +233,7 @@ impl LeaderPrepare {
}
let (want_parent, want_number) = match high_qc {
Some(qc) => (Some(qc.header().hash()), qc.header().number.next()),
None => (
genesis.fork.first_parent,
genesis.fork.first_block,
),
None => (genesis.fork.first_parent, genesis.fork.first_block),
};
if self.proposal.parent != want_parent {
return Err(Error::BadParentHash {
Expand Down
6 changes: 2 additions & 4 deletions node/libs/roles/src/validator/messages/replica_prepare.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,13 @@ impl ReplicaPrepare {
if self.view.number <= v.view.number {
return Err(Error::HighVoteFutureView);
}
v.verify(genesis)
.map_err(Error::HighVote)?;
v.verify(genesis).map_err(Error::HighVote)?;
}
if let Some(qc) = &self.high_qc {
if self.view.number <= qc.view().number {
return Err(Error::HighQCFutureView);
}
qc.verify(genesis)
.map_err(Error::HighQC)?;
qc.verify(genesis).map_err(Error::HighQC)?;
}
Ok(())
}
Expand Down
9 changes: 4 additions & 5 deletions node/libs/roles/src/validator/testonly.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
//! Test-only utilities.
use super::{
AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg,
FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg,
MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey,
ReplicaCommit, ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View,
ViewNumber,
FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash,
NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit,
ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View, ViewNumber,
};
use bit_vec::BitVec;
use rand::{
Expand Down Expand Up @@ -41,7 +40,7 @@ impl Setup {
first_block: BlockNumber(rng.gen_range(0..100)),
first_parent: Some(rng.gen()),
};
Self::new_with_fork(rng,validators,fork)
Self::new_with_fork(rng, validators, fork)
}

/// Next block to finalize.
Expand Down
32 changes: 17 additions & 15 deletions node/libs/storage/src/block_store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,7 @@ impl BlockStore {
let last = persistent.last(ctx).await.wrap("persistent.last()")?;
t.observe();
if let Some(last) = &last {
last.verify(&genesis)
.context("last.verify()")?;
last.verify(&genesis).context("last.verify()")?;
}
let state = BlockStoreState {
first: genesis.fork.first_block,
Expand All @@ -168,7 +167,9 @@ impl BlockStore {
});
// Verify the first block.
if let Some(block) = this.block(ctx, this.genesis.fork.first_block).await? {
block.verify(&this.genesis).with_context(|| format!("verify({:?})", this.genesis.fork.first_block))?;
block
.verify(&this.genesis)
.with_context(|| format!("verify({:?})", this.genesis.fork.first_block))?;
}
Ok((this.clone(), BlockStoreRunner(this)))
}
Expand Down Expand Up @@ -217,25 +218,26 @@ impl BlockStore {
ctx: &ctx::Ctx,
block: validator::FinalBlock,
) -> ctx::Result<()> {
let number = block.header().number;
let number = block.number();
{
let sub = &mut self.subscribe();
let queued_state =
sync::wait_for(ctx, sub, |queued_state| queued_state.next() >= number).await?;
if queued_state.next() != number {
if queued_state.next() > number {
return Ok(());
}
if queued_state.last.as_ref().map(|qc| qc.header().hash()) != block.header().parent {
return Err(
anyhow::format_err!("block.parent doesn't match the previous block").into(),
);
block.verify(&self.genesis).context("block.verify()")?;
// Verify parent hash, if previous block is available.
if let Some(last) = queued_state.last.as_ref() {
if Some(last.header().hash()) != block.header().parent {
return Err(anyhow::format_err!(
"block.parent = {:?}, want {:?}",
block.header().parent,
last.header().hash()
)
.into());
}
}
// Verify the message without verifying the signatures, to avoid doing it twice.
// TODO(gprusak): consider moving the signature verification here for resiliency.
block
.justification
.message
.verify(&self.genesis)?;
}
self.inner.send_if_modified(|inner| {
let modified = inner.queued_state.send_if_modified(|queued_state| {
Expand Down
14 changes: 0 additions & 14 deletions node/libs/storage/src/testonly/in_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,6 @@ impl BlockStore {
blocks: Mutex::default(),
}))
}

/*/// Forks the storage.
pub fn fork(&self, fork: validator::Fork) -> anyhow::Result<Self> {
let mut genesis = self.0.genesis.clone();
genesis.forks.push(fork)?;
let mut blocks = self.0.blocks.lock().unwrap().clone();
if let Some(first) = blocks.front().map(|b| b.header().number.0) {
blocks.truncate(genesis.forks.current().first_block.0.saturating_sub(first) as usize);
}
Ok(Self(Arc::new(BlockStoreInner {
genesis,
blocks: Mutex::new(blocks),
})))
}*/
}

#[async_trait::async_trait]
Expand Down
Loading

0 comments on commit 05de258

Please sign in to comment.