diff --git a/node/Cargo.lock b/node/Cargo.lock index 597d3880..1d756cc4 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -19,30 +19,30 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.4.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ + "crypto-common", "generic-array", ] [[package]] name = "aes" -version = "0.7.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", "cipher", "cpufeatures", - "opaque-debug", ] [[package]] name = "aes-gcm" -version = "0.9.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", "aes", @@ -317,21 +317,20 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", "cipher", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", "chacha20", @@ -369,11 +368,13 @@ dependencies = [ [[package]] name = "cipher" -version = "0.3.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "generic-array", + "crypto-common", + "inout", + "zeroize", ] [[package]] @@ -389,9 +390,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.7" +version = "4.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" +checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" dependencies = [ "clap_builder", "clap_derive", @@ -399,9 +400,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.7" +version = "4.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" +checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" dependencies = [ "anstream", "anstyle", @@ -524,14 +525,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] [[package]] name = "ctr" -version = "0.7.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ "cipher", ] @@ -618,15 +620,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2", + "subtle", "zeroize", ] @@ -653,9 +656,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" +checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8" dependencies = [ "libc", "windows-sys", @@ -697,9 +700,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "fixedbitset" @@ -775,9 +778,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug", "polyval", @@ -785,9 +788,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -803,9 +806,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "hashbrown" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "heck" @@ -836,9 +839,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -915,6 +918,15 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "is-terminal" version = "0.4.9" @@ -1432,9 +1444,9 @@ dependencies = [ [[package]] name = "poly1305" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug", @@ -1443,9 +1455,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.5.3" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ "cfg-if", "cpufeatures", @@ -1487,9 +1499,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] @@ -1519,9 +1531,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" dependencies = [ "bytes", "prost-derive", @@ -1529,9 +1541,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" +checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" dependencies = [ "bytes", "heck", @@ -1551,9 +1563,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", "itertools 0.11.0", @@ -1580,9 +1592,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" dependencies = [ "prost", ] @@ -1814,9 +1826,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.1", "errno", @@ -1854,9 +1866,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -1873,9 +1885,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -1940,9 +1952,12 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] [[package]] name = "sized-chunks" @@ -1962,9 +1977,9 @@ checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" dependencies = [ "aes-gcm", "blake2", @@ -2216,9 +2231,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", @@ -2227,9 +2242,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -2269,11 +2284,11 @@ checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "universal-hash" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "generic-array", + "crypto-common", "subtle", ] @@ -2541,9 +2556,9 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -2581,6 +2596,7 @@ name = "zksync_consensus_bft" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "once_cell", "rand 0.8.5", "thiserror", diff --git a/node/actors/bft/Cargo.toml b/node/actors/bft/Cargo.toml index 82110ac5..2e08c789 100644 --- a/node/actors/bft/Cargo.toml +++ b/node/actors/bft/Cargo.toml @@ -24,3 +24,4 @@ vise.workspace = true [dev-dependencies] tokio.workspace = true +assert_matches.workspace = true diff --git a/node/actors/bft/src/leader/mod.rs b/node/actors/bft/src/leader/mod.rs index 55a609a6..6cff3091 100644 --- a/node/actors/bft/src/leader/mod.rs +++ b/node/actors/bft/src/leader/mod.rs @@ -8,4 +8,8 @@ mod state_machine; #[cfg(test)] mod tests; -pub(crate) use state_machine::StateMachine; +#[cfg(test)] +pub(crate) use self::replica_commit::Error as ReplicaCommitError; +#[cfg(test)] +pub(crate) use self::replica_prepare::Error as ReplicaPrepareError; +pub(crate) use self::state_machine::StateMachine; diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 3222ffb8..fc291943 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -1,43 +1,470 @@ -use crate::testonly; -use rand::{rngs::StdRng, Rng, SeedableRng}; -use zksync_concurrency::ctx; -use zksync_consensus_roles::validator; - -// TODO(bruno): This only tests a particular case, not the whole method. -#[tokio::test] -async fn replica_commit() { - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut StdRng::seed_from_u64(6516565651); - - let keys: Vec<_> = (0..1).map(|_| rng.gen()).collect(); - let (genesis, val_set) = testonly::make_genesis( - &keys, - validator::ProtocolVersion::EARLIEST, - validator::Payload(vec![]), - ); - let (mut consensus, _) = testonly::make_consensus(ctx, &keys[0], &val_set, &genesis).await; - - consensus.leader.view = validator::ViewNumber(3); - consensus.leader.phase = validator::Phase::Commit; - - let test_replica_msg = - consensus - .inner - .secret_key - .sign_msg(validator::ConsensusMsg::ReplicaCommit( - validator::ReplicaCommit { - protocol_version: validator::ProtocolVersion::EARLIEST, - view: consensus.leader.view, - proposal: rng.gen(), - }, - )); - - match consensus.leader.process_replica_commit( - ctx, - &consensus.inner, - test_replica_msg.cast().unwrap(), - ) { - Err(super::replica_commit::Error::UnexpectedProposal) => {} - res => panic!("unexpected result {res:?}"), - } +use super::{ + replica_commit::Error as ReplicaCommitError, replica_prepare::Error as ReplicaPrepareError, +}; +use crate::testonly::ut_harness::UTHarness; +use assert_matches::assert_matches; +use rand::Rng; +use zksync_consensus_roles::validator::{ + self, CommitQC, ConsensusMsg, LeaderCommit, LeaderPrepare, Phase, PrepareQC, ProtocolVersion, + ReplicaCommit, ReplicaPrepare, ViewNumber, +}; + +#[tokio::test] +async fn replica_prepare_sanity() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let replica_prepare = util.new_current_replica_prepare(|_| {}).cast().unwrap().msg; + util.dispatch_replica_prepare_many( + vec![replica_prepare; util.consensus_threshold()], + util.keys(), + ) + .unwrap(); +} + +#[tokio::test] +async fn replica_prepare_sanity_yield_leader_prepare() { + let mut util = UTHarness::new_one().await; + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + util.dispatch_replica_prepare_one(replica_prepare.clone()) + .unwrap(); + let leader_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let replica_prepare = replica_prepare.cast::().unwrap().msg; + assert_matches!( + leader_prepare, + LeaderPrepare { + protocol_version, + view, + proposal, + proposal_payload: _, + justification, + } => { + assert_eq!(protocol_version, replica_prepare.protocol_version); + assert_eq!(view, replica_prepare.view); + assert_eq!(proposal.parent, replica_prepare.high_vote.proposal.hash()); + assert_eq!(justification, util.new_prepare_qc(|msg| *msg = replica_prepare)); + } + ); +} + +#[tokio::test] +async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let replica_prepare: ReplicaPrepare = + util.new_unfinalized_replica_prepare().cast().unwrap().msg; + util.dispatch_replica_prepare_many( + vec![replica_prepare.clone(); util.consensus_threshold()], + util.keys(), + ) + .unwrap(); + let leader_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + assert_matches!( + leader_prepare, + LeaderPrepare { + protocol_version, + view, + proposal, + proposal_payload, + justification, + } => { + assert_eq!(protocol_version, replica_prepare.protocol_version); + assert_eq!(view, replica_prepare.view); + assert_eq!(proposal, replica_prepare.high_vote.proposal); + assert_eq!(proposal_payload, None); + assert_matches!( + justification, + PrepareQC { map, .. } => { + assert_eq!(map.len(), 1); + assert_eq!(*map.first_key_value().unwrap().0, replica_prepare); + } + ); + } + ); +} + +#[tokio::test] +async fn replica_prepare_old_view() { + let mut util = UTHarness::new_one().await; + + util.set_replica_view(ViewNumber(1)); + util.set_leader_view(ViewNumber(2)); + util.set_leader_phase(Phase::Prepare); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!( + res, + Err(ReplicaPrepareError::Old { + current_view: ViewNumber(2), + current_phase: Phase::Prepare, + }) + ); +} + +#[tokio::test] +async fn replica_prepare_during_commit() { + let mut util = UTHarness::new_one().await; + + util.set_leader_phase(Phase::Commit); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!( + res, + Err(ReplicaPrepareError::Old { + current_view: ViewNumber(1), + current_phase: Phase::Commit, + }) + ); +} + +#[tokio::test] +async fn replica_prepare_not_leader_in_view() { + let mut util = UTHarness::new_with(2).await; + + let current_view_leader = util.view_leader(util.current_replica_view()); + assert_ne!(current_view_leader, util.owner_key().public()); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let res = util.dispatch_replica_prepare_one(replica_prepare.clone()); + assert_matches!(res, Err(ReplicaPrepareError::NotLeaderInView)); +} + +#[tokio::test] +async fn replica_prepare_already_exists() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_replica_view(view); + util.set_leader_view(view); + assert_eq!(util.view_leader(view), util.owner_key().public()); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let _ = util.dispatch_replica_prepare_one(replica_prepare.clone()); + let res = util.dispatch_replica_prepare_one(replica_prepare.clone()); + assert_matches!( + res, + Err(ReplicaPrepareError::Exists { existing_message }) => { + assert_eq!(existing_message, replica_prepare.cast().unwrap().msg); + } + ); +} + +#[tokio::test] +async fn replica_prepare_num_received_below_threshold() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_replica_view(view); + util.set_leader_view(view); + assert_eq!(util.view_leader(view), util.owner_key().public()); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!( + res, + Err(ReplicaPrepareError::NumReceivedBelowThreshold { + num_messages: 1, + threshold: 2 + }) + ); +} + +#[tokio::test] +async fn replica_prepare_invalid_sig() { + let mut util = UTHarness::new_one().await; + + let mut replica_prepare = util.new_current_replica_prepare(|_| {}); + replica_prepare.sig = util.rng().gen(); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!(res, Err(ReplicaPrepareError::InvalidSignature(_))); +} + +#[tokio::test] +async fn replica_prepare_invalid_commit_qc() { + let mut util = UTHarness::new_one().await; + + let junk = util.rng().gen::(); + let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = junk); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!(res, Err(ReplicaPrepareError::InvalidHighQC(..))); +} + +#[tokio::test] +async fn replica_prepare_high_qc_of_current_view() { + let mut util = UTHarness::new_one().await; + + let view = ViewNumber(1); + let qc_view = ViewNumber(1); + + util.set_view(view); + let qc = util.new_commit_qc(|msg| { + msg.view = qc_view; + }); + let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!( + res, + Err(ReplicaPrepareError::HighQCOfFutureView { high_qc_view, current_view }) => { + assert_eq!(high_qc_view, qc_view); + assert_eq!(current_view, view); + } + ); +} + +#[tokio::test] +async fn replica_prepare_high_qc_of_future_view() { + let mut util = UTHarness::new_one().await; + + let view = ViewNumber(1); + let qc_view = ViewNumber(2); + + util.set_view(view); + let qc = util.new_commit_qc(|msg| { + msg.view = qc_view; + }); + + let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); + let res = util.dispatch_replica_prepare_one(replica_prepare); + assert_matches!( + res, + Err(ReplicaPrepareError::HighQCOfFutureView{ high_qc_view, current_view }) => { + assert_eq!(high_qc_view, qc_view); + assert_eq!(current_view, view); + } + ); +} + +#[ignore = "fails/unsupported"] +#[tokio::test] +async fn replica_prepare_non_validator_signer() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_view(view); + assert_eq!(util.view_leader(view), util.key_at(0).public()); + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let _ = util.dispatch_replica_prepare_one(replica_prepare.clone()); + + let non_validator: validator::SecretKey = util.rng().gen(); + let replica_prepare = non_validator.sign_msg(replica_prepare.msg); + util.dispatch_replica_prepare_one(replica_prepare).unwrap(); + // PANICS: + // "Couldn't create justification from valid replica messages!: Message signer isn't in the validator set" +} + +#[tokio::test] +async fn replica_commit_sanity() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let replica_commit = util + .new_procedural_replica_commit_many() + .await + .cast() + .unwrap() + .msg; + util.dispatch_replica_commit_many( + vec![replica_commit; util.consensus_threshold()], + util.keys(), + ) + .unwrap(); +} + +#[tokio::test] +async fn replica_commit_sanity_yield_leader_commit() { + let mut util = UTHarness::new_one().await; + + let replica_commit = util.new_procedural_replica_commit_one().await; + util.dispatch_replica_commit_one(replica_commit.clone()) + .unwrap(); + let leader_commit = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let replica_commit = replica_commit.cast::().unwrap().msg; + assert_matches!( + leader_commit, + LeaderCommit { + protocol_version, + justification, + } => { + assert_eq!(protocol_version, replica_commit.protocol_version); + assert_eq!(justification, util.new_commit_qc(|msg| *msg = replica_commit)); + } + ); +} + +#[tokio::test] +async fn replica_commit_old() { + let mut util = UTHarness::new_one().await; + + let mut replica_commit = util + .new_procedural_replica_commit_one() + .await + .cast::() + .unwrap() + .msg; + replica_commit.view = util.current_replica_view().prev(); + let replica_commit = util + .owner_key() + .sign_msg(ConsensusMsg::ReplicaCommit(replica_commit)); + + let res = util.dispatch_replica_commit_one(replica_commit); + assert_matches!( + res, + Err(ReplicaCommitError::Old { current_view, current_phase }) => { + assert_eq!(current_view, util.current_replica_view()); + assert_eq!(current_phase, util.current_replica_phase()); + } + ); +} + +#[tokio::test] +async fn replica_commit_not_leader_in_view() { + let mut util = UTHarness::new_with(2).await; + + let current_view_leader = util.view_leader(util.current_replica_view()); + assert_ne!(current_view_leader, util.owner_key().public()); + + let replica_commit = util.new_current_replica_commit(|_| {}); + let res = util.dispatch_replica_commit_one(replica_commit); + assert_matches!(res, Err(ReplicaCommitError::NotLeaderInView)); +} + +#[tokio::test] +async fn replica_commit_already_exists() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_replica_view(view); + util.set_leader_view(view); + assert_eq!(util.view_leader(view), util.owner_key().public()); + + let replica_prepare_one = util.new_current_replica_prepare(|_| {}); + let _ = util.dispatch_replica_prepare_one(replica_prepare_one.clone()); + let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); + util.dispatch_replica_prepare_one(replica_prepare_two) + .unwrap(); + + let leader_prepare = util.recv_signed().await.unwrap(); + util.dispatch_leader_prepare(leader_prepare).await.unwrap(); + + let replica_commit = util.recv_signed().await.unwrap(); + let _ = util.dispatch_replica_commit_one(replica_commit.clone()); + let res = util.dispatch_replica_commit_one(replica_commit.clone()); + assert_matches!( + res, + Err(ReplicaCommitError::DuplicateMessage { existing_message }) => { + assert_eq!(existing_message, replica_commit.cast::().unwrap().msg) + } + ); +} + +#[tokio::test] +async fn replica_commit_num_received_below_threshold() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_replica_view(view); + util.set_leader_view(view); + assert_eq!(util.view_leader(view), util.owner_key().public()); + + let replica_prepare_one = util.new_current_replica_prepare(|_| {}); + let _ = util.dispatch_replica_prepare_one(replica_prepare_one.clone()); + let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); + util.dispatch_replica_prepare_one(replica_prepare_two) + .unwrap(); + + let leader_prepare = util.recv_signed().await.unwrap(); + util.dispatch_leader_prepare(leader_prepare).await.unwrap(); + + let replica_commit = util.recv_signed().await.unwrap(); + let res = util.dispatch_replica_commit_one(replica_commit.clone()); + assert_matches!( + res, + Err(ReplicaCommitError::NumReceivedBelowThreshold { + num_messages: 1, + threshold: 2 + }) + ); +} + +#[tokio::test] +async fn replica_commit_invalid_sig() { + let mut util = UTHarness::new_one().await; + + let mut replica_commit = util.new_current_replica_commit(|_| {}); + replica_commit.sig = util.rng().gen(); + let res = util.dispatch_replica_commit_one(replica_commit); + assert_matches!(res, Err(ReplicaCommitError::InvalidSignature(..))); +} + +#[tokio::test] +async fn replica_commit_unexpected_proposal() { + let mut util = UTHarness::new_one().await; + + let replica_commit = util.new_current_replica_commit(|_| {}); + let res = util.dispatch_replica_commit_one(replica_commit); + assert_matches!(res, Err(ReplicaCommitError::UnexpectedProposal)); +} + +#[ignore = "fails/unsupported"] +#[tokio::test] +async fn replica_commit_protocol_version_mismatch() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_replica_view(view); + util.set_leader_view(view); + assert_eq!(util.view_leader(view), util.owner_key().public()); + + let replica_prepare_one = util.new_current_replica_prepare(|_| {}); + let _ = util.dispatch_replica_prepare_one(replica_prepare_one.clone()); + let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); + util.dispatch_replica_prepare_one(replica_prepare_two) + .unwrap(); + + let leader_prepare = util.recv_signed().await.unwrap(); + util.dispatch_leader_prepare(leader_prepare).await.unwrap(); + + let replica_commit = util.recv_signed().await.unwrap(); + let _ = util.dispatch_replica_commit_one(replica_commit.clone()); + + let mut replica_commit_two = replica_commit.cast::().unwrap().msg; + replica_commit_two.protocol_version = + ProtocolVersion(replica_commit_two.protocol_version.0 + 1); + + let replica_commit_two = util + .key_at(1) + .sign_msg(ConsensusMsg::ReplicaCommit(replica_commit_two)); + util.dispatch_replica_commit_one(replica_commit_two) + .unwrap(); + // PANICS: + // "Couldn't create justification from valid replica messages!: CommitQC can only be created from votes for the same message." } diff --git a/node/actors/bft/src/replica/mod.rs b/node/actors/bft/src/replica/mod.rs index e76e045f..69ede4b0 100644 --- a/node/actors/bft/src/replica/mod.rs +++ b/node/actors/bft/src/replica/mod.rs @@ -11,4 +11,8 @@ mod state_machine; mod tests; mod timer; -pub(crate) use state_machine::StateMachine; +#[cfg(test)] +pub(crate) use self::leader_commit::Error as LeaderCommitError; +#[cfg(test)] +pub(crate) use self::leader_prepare::Error as LeaderPrepareError; +pub(crate) use self::state_machine::StateMachine; diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index a9bec7cd..3a904618 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -1,58 +1,517 @@ -use crate::testonly; +use super::{ + leader_commit::Error as LeaderCommitError, leader_prepare::Error as LeaderPrepareError, +}; +use crate::{inner::ConsensusInner, leader::ReplicaPrepareError, testonly::ut_harness::UTHarness}; +use assert_matches::assert_matches; use rand::Rng; -use zksync_concurrency::{ctx, scope, testonly::abort_on_panic, time}; -use zksync_consensus_network::io::{ConsensusInputMessage, Target}; -use zksync_consensus_roles::validator::{self, ViewNumber}; - -#[tokio::test] -async fn start_new_view_not_leader() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::ManualClock::new()); - let rng = &mut ctx.rng(); - - let keys: Vec<_> = (0..4).map(|_| rng.gen()).collect(); - let (genesis, val_set) = testonly::make_genesis( - &keys, - validator::ProtocolVersion::EARLIEST, - validator::Payload(vec![]), - ); - let (mut consensus, mut pipe) = - testonly::make_consensus(ctx, &keys[0], &val_set, &genesis).await; - // TODO: this test assumes a specific implementation of the leader schedule. - // Make it leader-schedule agnostic (use epoch to select a specific view). - consensus.replica.view = ViewNumber(1); - consensus.replica.high_qc = rng.gen(); - consensus.replica.high_qc.message.view = ViewNumber(0); - - scope::run!(ctx, |ctx, s| { - s.spawn(async { - consensus - .replica - .start_new_view(ctx, &consensus.inner) - .await - .unwrap(); - Ok(()) - }) - .join(ctx) - }) - .await +use std::cell::RefCell; +use zksync_consensus_roles::validator::{ + BlockHeaderHash, ConsensusMsg, LeaderCommit, LeaderPrepare, Payload, PrepareQC, ReplicaCommit, + ReplicaPrepare, ViewNumber, +}; + +#[tokio::test] +async fn leader_prepare_sanity() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let leader_prepare = util.new_procedural_leader_prepare_many().await; + util.dispatch_leader_prepare(leader_prepare).await.unwrap(); +} + +#[tokio::test] +async fn leader_prepare_reproposal_sanity() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let replica_prepare: ReplicaPrepare = + util.new_unfinalized_replica_prepare().cast().unwrap().msg; + util.dispatch_replica_prepare_many( + vec![replica_prepare.clone(); util.consensus_threshold()], + util.keys(), + ) .unwrap(); + let leader_prepare_signed = util.recv_signed().await.unwrap(); + + let leader_prepare = leader_prepare_signed + .clone() + .cast::() + .unwrap() + .msg; + assert_matches!( + leader_prepare, + LeaderPrepare {proposal_payload, .. } => { + assert_eq!(proposal_payload, None); + } + ); + + util.dispatch_leader_prepare(leader_prepare_signed) + .await + .unwrap(); +} + +#[tokio::test] +async fn leader_prepare_sanity_yield_replica_commit() { + let mut util = UTHarness::new_one().await; + + let leader_prepare = util.new_procedural_leader_prepare_one().await; + util.dispatch_leader_prepare(leader_prepare.clone()) + .await + .unwrap(); + let replica_commit = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let leader_prepare = leader_prepare.cast::().unwrap().msg; + assert_matches!( + replica_commit, + ReplicaCommit { + protocol_version, + view, + proposal, + } => { + assert_eq!(protocol_version, leader_prepare.protocol_version); + assert_eq!(view, leader_prepare.view); + assert_eq!(proposal, leader_prepare.proposal); + } + ); +} + +#[tokio::test] +async fn leader_prepare_invalid_leader() { + let mut util = UTHarness::new_with(2).await; + + let view = ViewNumber(2); + util.set_view(view); + assert_eq!(util.view_leader(view), util.key_at(0).public()); + + let replica_prepare_one = util.new_current_replica_prepare(|_| {}); + let res = util.dispatch_replica_prepare_one(replica_prepare_one.clone()); + assert_matches!( + res, + Err(ReplicaPrepareError::NumReceivedBelowThreshold { + num_messages: 1, + threshold: 2, + }) + ); + + let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); + util.dispatch_replica_prepare_one(replica_prepare_two) + .unwrap(); + let msg = util.recv_signed().await.unwrap(); + let mut leader_prepare = msg.cast::().unwrap().msg; + + leader_prepare.view = leader_prepare.view.next(); + assert_ne!( + util.view_leader(leader_prepare.view), + util.key_at(0).public() + ); + + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!( + res, + Err(LeaderPrepareError::InvalidLeader { correct_leader, received_leader }) => { + assert_eq!(correct_leader, util.key_at(1).public()); + assert_eq!(received_leader, util.key_at(0).public()); + } + ); +} + +#[tokio::test] +async fn leader_prepare_old_view() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + leader_prepare.view = util.current_replica_view().prev(); + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!( + res, + Err(LeaderPrepareError::Old { current_view, current_phase }) => { + assert_eq!(current_view, util.current_replica_view()); + assert_eq!(current_phase, util.current_replica_phase()); + } + ); +} + +#[tokio::test] +async fn leader_prepare_invalid_sig() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare = util.new_rnd_leader_prepare(|_| {}); + leader_prepare.sig = util.rng().gen(); + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::InvalidSignature(..))); +} + +#[tokio::test] +async fn leader_prepare_invalid_prepare_qc() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + leader_prepare.justification = util.rng().gen::(); + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!( + res, + Err(LeaderPrepareError::InvalidPrepareQC(err)) => { + assert_eq!(err.to_string(), "PrepareQC contains messages for different views!") + } + ); +} + +#[tokio::test] +async fn leader_prepare_invalid_high_qc() { + let mut util = UTHarness::new_one().await; + + let mut replica_prepare = util + .new_current_replica_prepare(|_| {}) + .cast::() + .unwrap() + .msg; + replica_prepare.high_qc = util.rng().gen(); + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + + let high_qc = util.rng().gen(); + leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_qc = high_qc); + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::InvalidHighQC(_))); +} + +#[tokio::test] +async fn leader_prepare_proposal_oversized_payload() { + let mut util = UTHarness::new_one().await; + let payload_oversize = ConsensusInner::PAYLOAD_MAX_SIZE + 1; + let payload_vec = vec![0; payload_oversize]; + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + leader_prepare.proposal_payload = Some(Payload(payload_vec)); + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!( + res, + Err(LeaderPrepareError::ProposalOversizedPayload{ payload_size, header }) => { + assert_eq!(payload_size, payload_oversize); + assert_eq!(header, leader_prepare.proposal); + } + ); +} + +#[tokio::test] +async fn leader_prepare_proposal_mismatched_payload() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + leader_prepare.proposal_payload = Some(util.rng().gen()); + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!(res, Err(LeaderPrepareError::ProposalMismatchedPayload)); +} + +#[tokio::test] +async fn leader_prepare_proposal_when_previous_not_finalized() { + let mut util = UTHarness::new_one().await; + + let replica_prepare = util.new_current_replica_prepare(|_| {}); + util.dispatch_replica_prepare_one(replica_prepare.clone()) + .unwrap(); + + let mut leader_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let high_vote = util.rng().gen(); + leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = high_vote); + + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!( + res, + Err(LeaderPrepareError::ProposalWhenPreviousNotFinalized) + ); +} + +#[tokio::test] +async fn leader_prepare_proposal_invalid_parent_hash() { + let mut util = UTHarness::new_one().await; + + let replica_prepare_signed = util.new_current_replica_prepare(|_| {}); + let replica_prepare = replica_prepare_signed + .clone() + .cast::() + .unwrap() + .msg; + util.dispatch_replica_prepare_one(replica_prepare_signed.clone()) + .unwrap(); + let mut leader_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let junk: BlockHeaderHash = util.rng().gen(); + leader_prepare.proposal.parent = junk; + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!( + res, + Err(LeaderPrepareError::ProposalInvalidParentHash { + correct_parent_hash, + received_parent_hash, + header + }) => { + assert_eq!(correct_parent_hash, replica_prepare.high_vote.proposal.hash()); + assert_eq!(received_parent_hash, junk); + assert_eq!(header, leader_prepare.proposal); + } + ); +} + +#[tokio::test] +async fn leader_prepare_proposal_non_sequential_number() { + let mut util = UTHarness::new_one().await; + + let replica_prepare_signed = util.new_current_replica_prepare(|_| {}); + let replica_prepare = replica_prepare_signed + .clone() + .cast::() + .unwrap() + .msg; + util.dispatch_replica_prepare_one(replica_prepare_signed) + .unwrap(); + let mut leader_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let correct_num = replica_prepare.high_vote.proposal.number.next(); + assert_eq!(correct_num, leader_prepare.proposal.number); + + let non_seq_num = correct_num.next(); + leader_prepare.proposal.number = non_seq_num; + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!( + res, + Err(LeaderPrepareError::ProposalNonSequentialNumber { correct_number, received_number, header }) => { + assert_eq!(correct_number, correct_num); + assert_eq!(received_number, non_seq_num); + assert_eq!(header, leader_prepare.proposal); + } + ); +} + +#[tokio::test] +async fn leader_prepare_reproposal_without_quorum() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let mut leader_prepare = util + .new_procedural_leader_prepare_many() + .await + .cast::() + .unwrap() + .msg; + + let rng = RefCell::new(util.new_rng()); + leader_prepare.justification = util.new_prepare_qc_many(&|msg: &mut ReplicaPrepare| { + let mut rng = rng.borrow_mut(); + msg.high_vote = rng.gen(); + }); + leader_prepare.proposal_payload = None; + + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::ReproposalWithoutQuorum)); +} + +#[tokio::test] +async fn leader_prepare_reproposal_when_finalized() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + leader_prepare.proposal_payload = None; + let leader_prepare_signed = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); + + let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + assert_matches!(res, Err(LeaderPrepareError::ReproposalWhenFinalized)); +} + +#[tokio::test] +async fn leader_prepare_reproposal_invalid_block() { + let mut util = UTHarness::new_one().await; + + let mut leader_prepare: LeaderPrepare = util + .new_procedural_leader_prepare_one() + .await + .cast() + .unwrap() + .msg; + + let high_vote = util.rng().gen(); + leader_prepare.justification = util.new_prepare_qc(|msg: &mut ReplicaPrepare| { + msg.high_vote = high_vote; + }); + leader_prepare.proposal_payload = None; + + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::ReproposalInvalidBlock)); +} + +#[tokio::test] +async fn leader_commit_sanity() { + let mut util = UTHarness::new_many().await; + + util.set_view(util.owner_as_view_leader()); + + let leader_commit = util.new_procedural_leader_commit_many().await; + util.dispatch_leader_commit(leader_commit).await.unwrap(); +} + +#[tokio::test] +async fn leader_commit_sanity_yield_replica_prepare() { + let mut util = UTHarness::new_one().await; + + let leader_commit = util.new_procedural_leader_commit_one().await; + util.dispatch_leader_commit(leader_commit.clone()) + .await + .unwrap(); + let replica_prepare = util + .recv_signed() + .await + .unwrap() + .cast::() + .unwrap() + .msg; + + let leader_commit = leader_commit.cast::().unwrap().msg; + assert_matches!( + replica_prepare, + ReplicaPrepare { + protocol_version, + view, + high_vote, + high_qc, + } => { + assert_eq!(protocol_version, leader_commit.protocol_version); + assert_eq!(view, leader_commit.justification.message.view.next()); + assert_eq!(high_vote, leader_commit.justification.message); + assert_eq!(high_qc, leader_commit.justification) + } + ); +} + +#[tokio::test] +async fn leader_commit_invalid_leader() { + let mut util = UTHarness::new_with(2).await; + + let current_view_leader = util.view_leader(util.current_replica_view()); + assert_ne!(current_view_leader, util.owner_key().public()); + + let leader_commit = util.new_rnd_leader_commit(|_| {}); + let res = util.dispatch_leader_commit(leader_commit).await; + assert_matches!(res, Err(LeaderCommitError::InvalidLeader { .. })); +} + +#[tokio::test] +async fn leader_commit_invalid_sig() { + let mut util = UTHarness::new_one().await; + + let mut leader_commit = util.new_rnd_leader_commit(|_| {}); + leader_commit.sig = util.rng().gen(); + let res = util.dispatch_leader_commit(leader_commit).await; + assert_matches!(res, Err(LeaderCommitError::InvalidSignature { .. })); +} + +#[tokio::test] +async fn leader_commit_invalid_commit_qc() { + let mut util = UTHarness::new_one().await; - let test_new_view_msg = ConsensusInputMessage { - message: consensus - .inner - .secret_key - .sign_msg(validator::ConsensusMsg::ReplicaPrepare( - validator::ReplicaPrepare { - protocol_version: validator::ProtocolVersion::EARLIEST, - view: consensus.replica.view, - high_vote: consensus.replica.high_vote, - high_qc: consensus.replica.high_qc.clone(), - }, - )), - recipient: Target::Validator(consensus.inner.view_leader(consensus.replica.view)), - }; - - assert_eq!(pipe.recv(ctx).await.unwrap(), test_new_view_msg.into()); - assert!(consensus.replica.timeout_deadline < time::Deadline::Infinite); + let leader_commit = util.new_rnd_leader_commit(|_| {}); + let res = util.dispatch_leader_commit(leader_commit).await; + assert_matches!(res, Err(LeaderCommitError::InvalidJustification { .. })); } diff --git a/node/actors/bft/src/testonly/mod.rs b/node/actors/bft/src/testonly/mod.rs index ed47074d..32e48d8b 100644 --- a/node/actors/bft/src/testonly/mod.rs +++ b/node/actors/bft/src/testonly/mod.rs @@ -12,6 +12,8 @@ mod make; mod node; #[cfg(test)] mod run; +#[cfg(test)] +pub(crate) mod ut_harness; #[cfg(test)] pub(crate) use fuzz::*; diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs new file mode 100644 index 00000000..fa05fd7c --- /dev/null +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -0,0 +1,483 @@ +use crate::{ + io::{InputMessage, OutputMessage}, + leader::{ReplicaCommitError, ReplicaPrepareError}, + replica::{LeaderCommitError, LeaderPrepareError}, + Consensus, +}; +use assert_matches::assert_matches; +use rand::{rngs::StdRng, Rng}; +use zksync_concurrency::{ + ctx, + ctx::{Canceled, Ctx}, + scope, +}; +use zksync_consensus_network::io::ConsensusInputMessage; +use zksync_consensus_roles::validator::{ + self, BlockHeader, CommitQC, ConsensusMsg, LeaderCommit, LeaderPrepare, Payload, Phase, + PrepareQC, ReplicaCommit, ReplicaPrepare, SecretKey, Signed, ViewNumber, +}; +use zksync_consensus_utils::pipe::DispatcherPipe; + +/// `UTHarness` provides various utilities for unit tests. +/// It is designed to simplify the setup and execution of test cases by encapsulating +/// common testing functionality. +/// +/// It should be instantiated once for every test case. +#[cfg(test)] +pub(crate) struct UTHarness { + ctx: Ctx, + rng: StdRng, + consensus: Consensus, + pipe: DispatcherPipe, + keys: Vec, +} + +impl UTHarness { + /// Creates a new `UTHarness` with one validator. + pub(crate) async fn new_one() -> UTHarness { + UTHarness::new_with(1).await + } + + /// Creates a new `UTHarness` with minimally-significant validator set size. + pub(crate) async fn new_many() -> UTHarness { + let num_validators = 6; + assert_matches!(crate::misc::faulty_replicas(num_validators), res if res > 0); + UTHarness::new_with(num_validators).await + } + + /// Creates a new `UTHarness` with the specified validator set size. + pub(crate) async fn new_with(num_validators: usize) -> UTHarness { + let ctx = ctx::test_root(&ctx::RealClock); + let mut rng = ctx.rng(); + let keys: Vec<_> = (0..num_validators).map(|_| rng.gen()).collect(); + let (genesis, val_set) = crate::testonly::make_genesis( + &keys, + validator::ProtocolVersion::EARLIEST, + Payload(vec![]), + ); + let (mut consensus, pipe) = + crate::testonly::make_consensus(&ctx, &keys[0], &val_set, &genesis).await; + + consensus.leader.view = ViewNumber(1); + consensus.replica.view = ViewNumber(1); + + UTHarness { + ctx, + rng, + consensus, + pipe, + keys, + } + } + + pub(crate) fn consensus_threshold(&self) -> usize { + crate::misc::consensus_threshold(self.keys.len()) + } + + pub(crate) fn owner_key(&self) -> &SecretKey { + &self.consensus.inner.secret_key + } + + pub(crate) fn owner_as_view_leader(&self) -> ViewNumber { + let mut view = self.current_replica_view(); + while self.view_leader(view) != self.owner_key().public() { + view = view.next(); + } + view + } + + pub(crate) fn key_at(&self, index: usize) -> &SecretKey { + &self.keys[index] + } + + pub(crate) fn keys(&self) -> Vec { + self.keys.clone() + } + + pub(crate) fn rng(&mut self) -> &mut StdRng { + &mut self.rng + } + + pub(crate) fn new_rng(&self) -> StdRng { + self.ctx.rng() + } + + pub(crate) fn set_view(&mut self, view: ViewNumber) { + self.set_replica_view(view); + self.set_leader_view(view); + } + + pub(crate) fn set_leader_view(&mut self, view: ViewNumber) { + self.consensus.leader.view = view + } + + pub(crate) fn set_leader_phase(&mut self, phase: Phase) { + self.consensus.leader.phase = phase + } + + pub(crate) fn set_replica_view(&mut self, view: ViewNumber) { + self.consensus.replica.view = view + } + + pub(crate) fn new_unfinalized_replica_prepare(&self) -> Signed { + self.new_current_replica_prepare(|msg| { + let mut high_vote = ReplicaCommit { + protocol_version: validator::ProtocolVersion::EARLIEST, + view: self.consensus.replica.view.next(), + proposal: self.consensus.replica.high_qc.message.proposal, + }; + + high_vote.proposal.parent = high_vote.proposal.hash(); + high_vote.proposal.number = high_vote.proposal.number.next(); + + msg.high_vote = high_vote; + }) + } + + pub(crate) fn new_current_replica_prepare( + &self, + mutate_fn: impl FnOnce(&mut ReplicaPrepare), + ) -> Signed { + let mut msg = ReplicaPrepare { + protocol_version: validator::ProtocolVersion::EARLIEST, + view: self.consensus.replica.view, + high_vote: self.consensus.replica.high_vote, + high_qc: self.consensus.replica.high_qc.clone(), + }; + + mutate_fn(&mut msg); + + self.consensus + .inner + .secret_key + .sign_msg(ConsensusMsg::ReplicaPrepare(msg)) + } + + pub(crate) fn new_rnd_leader_prepare( + &mut self, + mutate_fn: impl FnOnce(&mut LeaderPrepare), + ) -> Signed { + let payload: Payload = self.rng().gen(); + let mut msg = LeaderPrepare { + protocol_version: validator::ProtocolVersion::EARLIEST, + view: self.consensus.leader.view, + proposal: BlockHeader { + parent: self.consensus.replica.high_vote.proposal.hash(), + number: self.consensus.replica.high_vote.proposal.number.next(), + payload: payload.hash(), + }, + proposal_payload: Some(payload), + justification: self.rng().gen(), + }; + + mutate_fn(&mut msg); + + self.consensus + .inner + .secret_key + .sign_msg(ConsensusMsg::LeaderPrepare(msg)) + } + + pub(crate) fn new_current_replica_commit( + &self, + mutate_fn: impl FnOnce(&mut ReplicaCommit), + ) -> Signed { + let mut msg = ReplicaCommit { + protocol_version: validator::ProtocolVersion::EARLIEST, + view: self.consensus.replica.view, + proposal: self.consensus.replica.high_qc.message.proposal, + }; + + mutate_fn(&mut msg); + + self.consensus + .inner + .secret_key + .sign_msg(ConsensusMsg::ReplicaCommit(msg)) + } + + pub(crate) fn new_rnd_leader_commit( + &mut self, + mutate_fn: impl FnOnce(&mut LeaderCommit), + ) -> Signed { + let mut msg = LeaderCommit { + protocol_version: validator::ProtocolVersion::EARLIEST, + justification: self.rng().gen(), + }; + + mutate_fn(&mut msg); + + self.consensus + .inner + .secret_key + .sign_msg(ConsensusMsg::LeaderCommit(msg)) + } + + pub(crate) async fn new_procedural_leader_prepare_one(&mut self) -> Signed { + let replica_prepare = self.new_current_replica_prepare(|_| {}); + self.dispatch_replica_prepare_one(replica_prepare.clone()) + .unwrap(); + self.recv_signed().await.unwrap() + } + + pub(crate) async fn new_procedural_leader_prepare_many(&mut self) -> Signed { + let replica_prepare = self.new_current_replica_prepare(|_| {}).cast().unwrap().msg; + self.dispatch_replica_prepare_many( + vec![replica_prepare; self.consensus_threshold()], + self.keys(), + ) + .unwrap(); + self.recv_signed().await.unwrap() + } + + pub(crate) async fn new_procedural_replica_commit_one(&mut self) -> Signed { + let replica_prepare = self.new_current_replica_prepare(|_| {}); + self.dispatch_replica_prepare_one(replica_prepare.clone()) + .unwrap(); + let leader_prepare = self.recv_signed().await.unwrap(); + self.dispatch_leader_prepare(leader_prepare).await.unwrap(); + self.recv_signed().await.unwrap() + } + + pub(crate) async fn new_procedural_replica_commit_many(&mut self) -> Signed { + let leader_prepare = self.new_procedural_leader_prepare_many().await; + self.dispatch_leader_prepare(leader_prepare).await.unwrap(); + self.recv_signed().await.unwrap() + } + + pub(crate) async fn new_procedural_leader_commit_one(&mut self) -> Signed { + let replica_prepare = self.new_current_replica_prepare(|_| {}); + self.dispatch_replica_prepare_one(replica_prepare.clone()) + .unwrap(); + let leader_prepare = self.recv_signed().await.unwrap(); + self.dispatch_leader_prepare(leader_prepare).await.unwrap(); + let replica_commit = self.recv_signed().await.unwrap(); + self.dispatch_replica_commit_one(replica_commit).unwrap(); + self.recv_signed().await.unwrap() + } + + pub(crate) async fn new_procedural_leader_commit_many(&mut self) -> Signed { + let replica_commit = self + .new_procedural_replica_commit_many() + .await + .cast() + .unwrap() + .msg; + self.dispatch_replica_commit_many( + vec![replica_commit; self.consensus_threshold()], + self.keys(), + ) + .unwrap(); + self.recv_signed().await.unwrap() + } + + #[allow(clippy::result_large_err)] + pub(crate) fn dispatch_replica_prepare_one( + &mut self, + msg: Signed, + ) -> Result<(), ReplicaPrepareError> { + self.consensus.leader.process_replica_prepare( + &self.ctx, + &self.consensus.inner, + msg.cast().unwrap(), + ) + } + + #[allow(clippy::result_large_err)] + pub(crate) fn dispatch_replica_prepare_many( + &mut self, + messages: Vec, + keys: Vec, + ) -> Result<(), ReplicaPrepareError> { + let len = messages.len(); + let consensus_threshold = self.consensus_threshold(); + messages + .into_iter() + .zip(keys) + .map(|(msg, key)| { + let signed = key.sign_msg(ConsensusMsg::ReplicaPrepare(msg)); + self.dispatch_replica_prepare_one(signed) + }) + .fold((0, None), |(i, _), res| { + let i = i + 1; + if i < len { + assert_matches!( + res, + Err(ReplicaPrepareError::NumReceivedBelowThreshold { + num_messages, + threshold, + }) => { + assert_eq!(num_messages, i); + assert_eq!(threshold, consensus_threshold) + } + ); + } + (i, Some(res)) + }) + .1 + .unwrap() + } + + pub(crate) fn dispatch_replica_commit_one( + &mut self, + msg: Signed, + ) -> Result<(), ReplicaCommitError> { + self.consensus.leader.process_replica_commit( + &self.ctx, + &self.consensus.inner, + msg.cast().unwrap(), + ) + } + + pub(crate) fn dispatch_replica_commit_many( + &mut self, + messages: Vec, + keys: Vec, + ) -> Result<(), ReplicaCommitError> { + let len = messages.len(); + let consensus_threshold = self.consensus_threshold(); + messages + .into_iter() + .zip(keys) + .map(|(msg, key)| { + let signed = key.sign_msg(ConsensusMsg::ReplicaCommit(msg)); + self.dispatch_replica_commit_one(signed) + }) + .fold((0, None), |(i, _), res| { + let i = i + 1; + if i < len { + assert_matches!( + res, + Err(ReplicaCommitError::NumReceivedBelowThreshold { + num_messages, + threshold, + }) => { + assert_eq!(num_messages, i); + assert_eq!(threshold, consensus_threshold) + } + ); + } + (i, Some(res)) + }) + .1 + .unwrap() + } + + pub(crate) async fn dispatch_leader_prepare( + &mut self, + msg: Signed, + ) -> Result<(), LeaderPrepareError> { + scope::run!(&self.ctx, |ctx, s| { + s.spawn(async { + let res = self + .consensus + .replica + .process_leader_prepare(ctx, &self.consensus.inner, msg.cast().unwrap()) + .await; + Ok(res) + }) + .join(ctx) + }) + .await + .unwrap() + } + + pub(crate) async fn dispatch_leader_commit( + &mut self, + msg: Signed, + ) -> Result<(), LeaderCommitError> { + scope::run!(&self.ctx, |ctx, s| { + s.spawn(async { + let res = self + .consensus + .replica + .process_leader_commit(ctx, &self.consensus.inner, msg.cast().unwrap()) + .await; + Ok(res) + }) + .join(ctx) + }) + .await + .unwrap() + } + + pub(crate) async fn recv_signed(&mut self) -> Result, Canceled> { + self.pipe + .recv(&self.ctx) + .await + .map(|output_message| match output_message { + OutputMessage::Network(ConsensusInputMessage { + message: signed, .. + }) => signed, + }) + } + + pub(crate) fn current_replica_view(&self) -> ViewNumber { + self.consensus.replica.view + } + + pub(crate) fn current_replica_phase(&self) -> Phase { + self.consensus.replica.phase + } + + pub(crate) fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { + self.consensus.inner.view_leader(view) + } + + pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC { + let validator_set = + validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); + + let msg = self + .new_current_replica_commit(mutate_fn) + .cast() + .unwrap() + .msg; + + let signed_messages: Vec<_> = self.keys.iter().map(|sk| sk.sign_msg(msg)).collect(); + + CommitQC::from(&signed_messages, &validator_set).unwrap() + } + + pub(crate) fn new_prepare_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaPrepare)) -> PrepareQC { + let validator_set = + validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); + + let msg: ReplicaPrepare = self + .new_current_replica_prepare(mutate_fn) + .cast() + .unwrap() + .msg; + + let signed_messages: Vec<_> = self + .keys + .iter() + .map(|sk| sk.sign_msg(msg.clone())) + .collect(); + + PrepareQC::from(&signed_messages, &validator_set).unwrap() + } + + pub(crate) fn new_prepare_qc_many( + &mut self, + mutate_fn: &dyn Fn(&mut ReplicaPrepare), + ) -> PrepareQC { + let validator_set = + validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); + + let signed_messages: Vec<_> = self + .keys + .iter() + .map(|sk| { + let msg: ReplicaPrepare = self + .new_current_replica_prepare(|msg| mutate_fn(msg)) + .cast() + .unwrap() + .msg; + sk.sign_msg(msg.clone()) + }) + .collect(); + + PrepareQC::from(&signed_messages, &validator_set).unwrap() + } +} diff --git a/node/actors/bft/src/tests.rs b/node/actors/bft/src/tests.rs index a7f65479..e4685997 100644 --- a/node/actors/bft/src/tests.rs +++ b/node/actors/bft/src/tests.rs @@ -27,30 +27,37 @@ async fn run_test(behavior: Behavior, network: Network) { async fn honest_mock_network() { run_test(Behavior::Honest, Network::Mock).await } + #[tokio::test(flavor = "multi_thread")] async fn honest_real_network() { run_test(Behavior::Honest, Network::Real).await } + #[tokio::test(flavor = "multi_thread")] async fn offline_mock_network() { run_test(Behavior::Offline, Network::Mock).await } + #[tokio::test(flavor = "multi_thread")] async fn offline_real_network() { run_test(Behavior::Offline, Network::Real).await } + #[tokio::test(flavor = "multi_thread")] async fn random_mock_network() { run_test(Behavior::Random, Network::Mock).await } + #[tokio::test(flavor = "multi_thread")] async fn random_real_network() { run_test(Behavior::Random, Network::Real).await } + #[tokio::test(flavor = "multi_thread")] async fn byzantine_mock_network() { run_test(Behavior::Byzantine, Network::Mock).await } + #[tokio::test(flavor = "multi_thread")] async fn byzantine_real_network() { run_test(Behavior::Byzantine, Network::Real).await diff --git a/node/deny.toml b/node/deny.toml index e7d3b9cd..f59f2fe8 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -60,9 +60,6 @@ skip = [ # Old versions required by pairing_ce & ff_ce. { name = "rand", version = "0.4" }, { name = "syn", version = "1.0" }, - - # Old versions required by criterion. - { name = "itertools", version = "0.10.5" } ] [sources] diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 71619b89..36e9b60c 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -11,7 +11,7 @@ use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; /// It allows to prevent misinterpretation of messages signed by validators /// using different versions of the binaries. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ProtocolVersion(pub(crate) u32); +pub struct ProtocolVersion(pub u32); impl ProtocolVersion { /// Earliest protocol version.